Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  DEFAULTLENGTH = {'string':512, 
  139                   'password':512, 
  140                   'upload':512, 
  141                   'text':2**15, 
  142                   'blob':2**31} 
  143  TIMINGSSIZE = 100 
  144  SPATIALLIBS = { 
  145      'Windows':'libspatialite', 
  146      'Linux':'libspatialite.so', 
  147      'Darwin':'libspatialite.dylib' 
  148      } 
  149  DEFAULT_URI = 'sqlite://dummy.db' 
  150   
  151  import re 
  152  import sys 
  153  import locale 
  154  import os 
  155  import types 
  156  import datetime 
  157  import threading 
  158  import time 
  159  import csv 
  160  import cgi 
  161  import copy 
  162  import socket 
  163  import logging 
  164  import base64 
  165  import shutil 
  166  import marshal 
  167  import decimal 
  168  import struct 
  169  import urllib 
  170  import hashlib 
  171  import uuid 
  172  import glob 
  173  import traceback 
  174  import platform 
  175   
  176  PYTHON_VERSION = sys.version_info[0] 
  177  if PYTHON_VERSION == 2: 
  178      import cPickle as pickle 
  179      import cStringIO as StringIO 
  180      import copy_reg as copyreg 
  181      hashlib_md5 = hashlib.md5 
  182      bytes, unicode = str, unicode 
  183  else: 
  184      import pickle 
  185      from io import StringIO as StringIO 
  186      import copyreg 
  187      long = int 
  188      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  189      bytes, unicode = bytes, str 
  190   
  191  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  192                   types.BuiltinFunctionType, 
  193                   types.MethodType, types.BuiltinMethodType) 
  194   
  195  TABLE_ARGS = set( 
  196      ('migrate','primarykey','fake_migrate','format','redefine', 
  197       'singular','plural','trigger_name','sequence_name','fields', 
  198       'common_filter','polymodel','table_class','on_define','actual_name')) 
  199   
  200  SELECT_ARGS = set( 
  201      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  202       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  203   
  204  ogetattr = object.__getattribute__ 
  205  osetattr = object.__setattr__ 
  206  exists = os.path.exists 
  207  pjoin = os.path.join 
  208   
  209  ################################################################################### 
  210  # following checks allow the use of dal without web2py, as a standalone module 
  211  ################################################################################### 
  212  try: 
  213      from gluon.utils import web2py_uuid 
  214  except (ImportError, SystemError): 
  215      import uuid 
216 - def web2py_uuid(): return str(uuid.uuid4())
217 218 try: 219 import portalocker 220 have_portalocker = True 221 except ImportError: 222 have_portalocker = False 223 224 try: 225 from gluon import serializers 226 have_serializers = True 227 except ImportError: 228 have_serializers = False 229 try: 230 import json as simplejson 231 except ImportError: 232 try: 233 import gluon.contrib.simplejson as simplejson 234 except ImportError: 235 simplejson = None 236 237 LOGGER = logging.getLogger("web2py.dal") 238 DEFAULT = lambda:0 239 240 GLOBAL_LOCKER = threading.RLock() 241 THREAD_LOCAL = threading.local() 242 243 # internal representation of tables with field 244 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 245 246 REGEX_TYPE = re.compile('^([\w\_\:]+)') 247 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 248 REGEX_W = re.compile('^\w+$') 249 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 250 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$') 251 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 252 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 253 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 254 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 255 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 256 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 257 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 258 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 259 REGEX_QUOTES = re.compile("'[^']*'") 260 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 261 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 262 REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)' 263 264 # list of drivers will be built on the fly 265 # and lists only what is available 266 DRIVERS = [] 267 268 try: 269 from new import classobj 270 from google.appengine.ext import db as gae 271 from google.appengine.api import namespace_manager, rdbms 272 from google.appengine.api.datastore_types import Key ### for belongs on ID 273 from google.appengine.ext.db.polymodel import PolyModel 274 DRIVERS.append('google') 275 except ImportError: 276 pass 277 278 if not 'google' in DRIVERS: 279 280 try: 281 from pysqlite2 import dbapi2 as sqlite2 282 DRIVERS.append('SQLite(sqlite2)') 283 except ImportError: 284 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 285 286 try: 287 from sqlite3 import dbapi2 as sqlite3 288 DRIVERS.append('SQLite(sqlite3)') 289 except ImportError: 290 LOGGER.debug('no SQLite drivers sqlite3') 291 292 try: 293 # first try contrib driver, then from site-packages (if installed) 294 try: 295 import gluon.contrib.pymysql as pymysql 296 # monkeypatch pymysql because they havent fixed the bug: 297 # https://github.com/petehunt/PyMySQL/issues/86 298 pymysql.ESCAPE_REGEX = re.compile("'") 299 pymysql.ESCAPE_MAP = {"'": "''"} 300 # end monkeypatch 301 except ImportError: 302 import pymysql 303 DRIVERS.append('MySQL(pymysql)') 304 except ImportError: 305 LOGGER.debug('no MySQL driver pymysql') 306 307 try: 308 import MySQLdb 309 DRIVERS.append('MySQL(MySQLdb)') 310 except ImportError: 311 LOGGER.debug('no MySQL driver MySQLDB') 312 313 try: 314 import mysql.connector as mysqlconnector 315 DRIVERS.append("MySQL(mysqlconnector)") 316 except ImportError: 317 LOGGER.debug("no driver mysql.connector") 318 319 try: 320 import psycopg2 321 from psycopg2.extensions import adapt as psycopg2_adapt 322 DRIVERS.append('PostgreSQL(psycopg2)') 323 except ImportError: 324 LOGGER.debug('no PostgreSQL driver psycopg2') 325 326 try: 327 # first try contrib driver, then from site-packages (if installed) 328 try: 329 import gluon.contrib.pg8000.dbapi as pg8000 330 except ImportError: 331 import pg8000.dbapi as pg8000 332 DRIVERS.append('PostgreSQL(pg8000)') 333 except ImportError: 334 LOGGER.debug('no PostgreSQL driver pg8000') 335 336 try: 337 import cx_Oracle 338 DRIVERS.append('Oracle(cx_Oracle)') 339 except ImportError: 340 LOGGER.debug('no Oracle driver cx_Oracle') 341 342 try: 343 try: 344 import pyodbc 345 except ImportError: 346 try: 347 import gluon.contrib.pypyodbc as pyodbc 348 except Exception, e: 349 raise ImportError(str(e)) 350 DRIVERS.append('MSSQL(pyodbc)') 351 DRIVERS.append('DB2(pyodbc)') 352 DRIVERS.append('Teradata(pyodbc)') 353 DRIVERS.append('Ingres(pyodbc)') 354 except ImportError: 355 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 356 357 try: 358 import Sybase 359 DRIVERS.append('Sybase(Sybase)') 360 except ImportError: 361 LOGGER.debug('no Sybase driver') 362 363 try: 364 import kinterbasdb 365 DRIVERS.append('Interbase(kinterbasdb)') 366 DRIVERS.append('Firebird(kinterbasdb)') 367 except ImportError: 368 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 369 370 try: 371 import fdb 372 DRIVERS.append('Firebird(fdb)') 373 except ImportError: 374 LOGGER.debug('no Firebird driver fdb') 375 ##### 376 try: 377 import firebirdsql 378 DRIVERS.append('Firebird(firebirdsql)') 379 except ImportError: 380 LOGGER.debug('no Firebird driver firebirdsql') 381 382 try: 383 import informixdb 384 DRIVERS.append('Informix(informixdb)') 385 LOGGER.warning('Informix support is experimental') 386 except ImportError: 387 LOGGER.debug('no Informix driver informixdb') 388 389 try: 390 import sapdb 391 DRIVERS.append('SQL(sapdb)') 392 LOGGER.warning('SAPDB support is experimental') 393 except ImportError: 394 LOGGER.debug('no SAP driver sapdb') 395 396 try: 397 import cubriddb 398 DRIVERS.append('Cubrid(cubriddb)') 399 LOGGER.warning('Cubrid support is experimental') 400 except ImportError: 401 LOGGER.debug('no Cubrid driver cubriddb') 402 403 try: 404 from com.ziclix.python.sql import zxJDBC 405 import java.sql 406 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 407 from org.sqlite import JDBC # required by java.sql; ensure we have it 408 zxJDBC_sqlite = java.sql.DriverManager 409 DRIVERS.append('PostgreSQL(zxJDBC)') 410 DRIVERS.append('SQLite(zxJDBC)') 411 LOGGER.warning('zxJDBC support is experimental') 412 is_jdbc = True 413 except ImportError: 414 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 415 is_jdbc = False 416 417 try: 418 import couchdb 419 DRIVERS.append('CouchDB(couchdb)') 420 except ImportError: 421 LOGGER.debug('no Couchdb driver couchdb') 422 423 try: 424 import pymongo 425 DRIVERS.append('MongoDB(pymongo)') 426 except: 427 LOGGER.debug('no MongoDB driver pymongo') 428 429 try: 430 import imaplib 431 DRIVERS.append('IMAP(imaplib)') 432 except: 433 LOGGER.debug('no IMAP driver imaplib') 434 435 PLURALIZE_RULES = [ 436 (re.compile('child$'), re.compile('child$'), 'children'), 437 (re.compile('oot$'), re.compile('oot$'), 'eet'), 438 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 439 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 440 (re.compile('sis$'), re.compile('sis$'), 'ses'), 441 (re.compile('man$'), re.compile('man$'), 'men'), 442 (re.compile('ife$'), re.compile('ife$'), 'ives'), 443 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 444 (re.compile('lf$'), re.compile('lf$'), 'lves'), 445 (re.compile('[sxz]$'), re.compile('$'), 'es'), 446 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 447 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 448 (re.compile('$'), re.compile('$'), 's'), 449 ]
450 451 -def pluralize(singular, rules=PLURALIZE_RULES):
452 for line in rules: 453 re_search, re_sub, replace = line 454 plural = re_search.search(singular) and re_sub.sub(replace, singular) 455 if plural: return plural
456
457 -def hide_password(uri):
458 if isinstance(uri,(list,tuple)): 459 return [hide_password(item) for item in uri] 460 return REGEX_NOPASSWD.sub('******',uri)
461
462 -def OR(a,b):
463 return a|b
464
465 -def AND(a,b):
466 return a&b
467
468 -def IDENTITY(x): return x
469
470 -def varquote_aux(name,quotestr='%s'):
471 return name if REGEX_W.match(name) else quotestr % name
472
473 -def quote_keyword(a,keyword='timestamp'):
474 regex = re.compile('\.keyword(?=\w)') 475 a = regex.sub('."%s"' % keyword,a) 476 return a
477 478 if 'google' in DRIVERS: 479 480 is_jdbc = False
481 482 - class GAEDecimalProperty(gae.Property):
483 """ 484 GAE decimal implementation 485 """ 486 data_type = decimal.Decimal 487
488 - def __init__(self, precision, scale, **kwargs):
489 super(GAEDecimalProperty, self).__init__(self, **kwargs) 490 d = '1.' 491 for x in range(scale): 492 d += '0' 493 self.round = decimal.Decimal(d)
494
495 - def get_value_for_datastore(self, model_instance):
496 value = super(GAEDecimalProperty, self)\ 497 .get_value_for_datastore(model_instance) 498 if value is None or value == '': 499 return None 500 else: 501 return str(value)
502
503 - def make_value_from_datastore(self, value):
504 if value is None or value == '': 505 return None 506 else: 507 return decimal.Decimal(value).quantize(self.round)
508
509 - def validate(self, value):
510 value = super(GAEDecimalProperty, self).validate(value) 511 if value is None or isinstance(value, decimal.Decimal): 512 return value 513 elif isinstance(value, basestring): 514 return decimal.Decimal(value) 515 raise gae.BadValueError("Property %s must be a Decimal or string."\ 516 % self.name)
517
518 ################################################################################### 519 # class that handles connection pooling (all adapters are derived from this one) 520 ################################################################################### 521 522 -class ConnectionPool(object):
523 524 POOLS = {} 525 check_active_connection = True 526 527 @staticmethod
528 - def set_folder(folder):
530 531 # ## this allows gluon to commit/rollback all dbs in this thread 532
533 - def close(self,action='commit',really=True):
534 if action: 535 if callable(action): 536 action(self) 537 else: 538 getattr(self, action)() 539 # ## if you want pools, recycle this connection 540 if self.pool_size: 541 GLOBAL_LOCKER.acquire() 542 pool = ConnectionPool.POOLS[self.uri] 543 if len(pool) < self.pool_size: 544 pool.append(self.connection) 545 really = False 546 GLOBAL_LOCKER.release() 547 if really: 548 self.close_connection() 549 self.connection = None
550 551 @staticmethod
552 - def close_all_instances(action):
553 """ to close cleanly databases in a multithreaded environment """ 554 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 555 for db_uid, db_group in dbs: 556 for db in db_group: 557 if hasattr(db,'_adapter'): 558 db._adapter.close(action) 559 getattr(THREAD_LOCAL,'db_instances',{}).clear() 560 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 561 if callable(action): 562 action(None) 563 return
564
565 - def find_or_make_work_folder(self):
566 """ this actually does not make the folder. it has to be there """ 567 self.folder = getattr(THREAD_LOCAL,'folder','') 568 569 if (os.path.isabs(self.folder) and 570 isinstance(self, UseDatabaseStoredFile) and 571 self.folder.startswith(os.getcwd())): 572 self.folder = os.path.relpath(self.folder, os.getcwd()) 573 574 # Creating the folder if it does not exist 575 if False and self.folder and not exists(self.folder): 576 os.mkdir(self.folder)
577
578 - def after_connection_hook(self):
579 """hook for the after_connection parameter""" 580 if callable(self._after_connection): 581 self._after_connection(self) 582 self.after_connection()
583
584 - def after_connection(self):
585 """ this it is supposed to be overloaded by adapters""" 586 pass
587
588 - def reconnect(self, f=None, cursor=True):
589 """ 590 this function defines: self.connection and self.cursor 591 (iff cursor is True) 592 if self.pool_size>0 it will try pull the connection from the pool 593 if the connection is not active (closed by db server) it will loop 594 if not self.pool_size or no active connections in pool makes a new one 595 """ 596 if getattr(self,'connection', None) != None: 597 return 598 if f is None: 599 f = self.connector 600 601 # if not hasattr(self, "driver") or self.driver is None: 602 # LOGGER.debug("Skipping connection since there's no driver") 603 # return 604 605 if not self.pool_size: 606 self.connection = f() 607 self.cursor = cursor and self.connection.cursor() 608 else: 609 uri = self.uri 610 POOLS = ConnectionPool.POOLS 611 while True: 612 GLOBAL_LOCKER.acquire() 613 if not uri in POOLS: 614 POOLS[uri] = [] 615 if POOLS[uri]: 616 self.connection = POOLS[uri].pop() 617 GLOBAL_LOCKER.release() 618 self.cursor = cursor and self.connection.cursor() 619 try: 620 if self.cursor and self.check_active_connection: 621 self.execute('SELECT 1;') 622 break 623 except: 624 pass 625 else: 626 GLOBAL_LOCKER.release() 627 self.connection = f() 628 self.cursor = cursor and self.connection.cursor() 629 break 630 self.after_connection_hook()
631
632 633 ################################################################################### 634 # this is a generic adapter that does nothing; all others are derived from this one 635 ################################################################################### 636 637 -class BaseAdapter(ConnectionPool):
638 native_json = False 639 driver = None 640 driver_name = None 641 drivers = () # list of drivers from which to pick 642 connection = None 643 commit_on_alter_table = False 644 support_distributed_transaction = False 645 uploads_in_blob = False 646 can_select_for_update = True 647 dbpath = None 648 folder = None 649 650 TRUE = 'T' 651 FALSE = 'F' 652 T_SEP = ' ' 653 QUOTE_TEMPLATE = '"%s"' 654 655 types = { 656 'boolean': 'CHAR(1)', 657 'string': 'CHAR(%(length)s)', 658 'text': 'TEXT', 659 'json': 'TEXT', 660 'password': 'CHAR(%(length)s)', 661 'blob': 'BLOB', 662 'upload': 'CHAR(%(length)s)', 663 'integer': 'INTEGER', 664 'bigint': 'INTEGER', 665 'float':'DOUBLE', 666 'double': 'DOUBLE', 667 'decimal': 'DOUBLE', 668 'date': 'DATE', 669 'time': 'TIME', 670 'datetime': 'TIMESTAMP', 671 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 672 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 673 'list:integer': 'TEXT', 674 'list:string': 'TEXT', 675 'list:reference': 'TEXT', 676 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 677 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 678 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 679 } 680
681 - def isOperationalError(self,exception):
682 if not hasattr(self.driver, "OperationalError"): 683 return None 684 return isinstance(exception, self.driver.OperationalError)
685
686 - def isProgrammingError(self,exception):
687 if not hasattr(self.driver, "ProgrammingError"): 688 return None 689 return isinstance(exception, self.driver.ProgrammingError)
690
691 - def id_query(self, table):
692 pkeys = getattr(table,'_primarykey',None) 693 if pkeys: 694 return table[pkeys[0]] != None 695 else: 696 return table._id != None
697
698 - def adapt(self, obj):
699 return "'%s'" % obj.replace("'", "''")
700
701 - def smart_adapt(self, obj):
702 if isinstance(obj,(int,float)): 703 return str(obj) 704 return self.adapt(str(obj))
705
706 - def file_exists(self, filename):
707 """ 708 to be used ONLY for files that on GAE may not be on filesystem 709 """ 710 return exists(filename)
711
712 - def file_open(self, filename, mode='rb', lock=True):
713 """ 714 to be used ONLY for files that on GAE may not be on filesystem 715 """ 716 if have_portalocker and lock: 717 fileobj = portalocker.LockedFile(filename,mode) 718 else: 719 fileobj = open(filename,mode) 720 return fileobj
721
722 - def file_close(self, fileobj):
723 """ 724 to be used ONLY for files that on GAE may not be on filesystem 725 """ 726 if fileobj: 727 fileobj.close()
728
729 - def file_delete(self, filename):
730 os.unlink(filename)
731
732 - def find_driver(self,adapter_args,uri=None):
733 self.adapter_args = adapter_args 734 if getattr(self,'driver',None) != None: 735 return 736 drivers_available = [driver for driver in self.drivers 737 if driver in globals()] 738 if uri: 739 items = uri.split('://',1)[0].split(':') 740 request_driver = items[1] if len(items)>1 else None 741 else: 742 request_driver = None 743 request_driver = request_driver or adapter_args.get('driver') 744 if request_driver: 745 if request_driver in drivers_available: 746 self.driver_name = request_driver 747 self.driver = globals().get(request_driver) 748 else: 749 raise RuntimeError("driver %s not available" % request_driver) 750 elif drivers_available: 751 self.driver_name = drivers_available[0] 752 self.driver = globals().get(self.driver_name) 753 else: 754 raise RuntimeError("no driver available %s" % str(self.drivers))
755
756 - def log(self, message, table=None):
757 """ Logs migrations 758 759 It will not log changes if logfile is not specified. Defaults 760 to sql.log 761 """ 762 763 isabs = None 764 logfilename = self.adapter_args.get('logfile','sql.log') 765 writelog = bool(logfilename) 766 if writelog: 767 isabs = os.path.isabs(logfilename) 768 769 if table and table._dbt and writelog and self.folder: 770 if isabs: 771 table._loggername = logfilename 772 else: 773 table._loggername = pjoin(self.folder, logfilename) 774 logfile = self.file_open(table._loggername, 'a') 775 logfile.write(message) 776 self.file_close(logfile)
777 778
779 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 780 credential_decoder=IDENTITY, driver_args={}, 781 adapter_args={},do_connect=True, after_connection=None):
782 self.db = db 783 self.dbengine = "None" 784 self.uri = uri 785 self.pool_size = pool_size 786 self.folder = folder 787 self.db_codec = db_codec 788 self._after_connection = after_connection 789 class Dummy(object): 790 lastrowid = 1 791 def __getattr__(self, value): 792 return lambda *a, **b: []
793 self.connection = Dummy() 794 self.cursor = Dummy() 795
796 - def sequence_name(self,tablename):
797 return '%s_sequence' % tablename
798
799 - def trigger_name(self,tablename):
800 return '%s_sequence' % tablename
801
802 - def varquote(self,name):
803 return name
804
805 - def create_table(self, table, 806 migrate=True, 807 fake_migrate=False, 808 polymodel=None):
809 db = table._db 810 fields = [] 811 # PostGIS geo fields are added after the table has been created 812 postcreation_fields = [] 813 sql_fields = {} 814 sql_fields_aux = {} 815 TFK = {} 816 tablename = table._tablename 817 sortable = 0 818 types = self.types 819 for field in table: 820 sortable += 1 821 field_name = field.name 822 field_type = field.type 823 if isinstance(field_type,SQLCustomType): 824 ftype = field_type.native or field_type.type 825 elif field_type.startswith('reference'): 826 referenced = field_type[10:].strip() 827 if referenced == '.': 828 referenced = tablename 829 constraint_name = self.constraint_name(tablename, field_name) 830 if not '.' in referenced \ 831 and referenced != tablename \ 832 and hasattr(table,'_primarykey'): 833 ftype = types['integer'] 834 else: 835 if hasattr(table,'_primarykey'): 836 rtablename,rfieldname = referenced.split('.') 837 rtable = db[rtablename] 838 rfield = rtable[rfieldname] 839 # must be PK reference or unique 840 if rfieldname in rtable._primarykey or \ 841 rfield.unique: 842 ftype = types[rfield.type[:9]] % \ 843 dict(length=rfield.length) 844 # multicolumn primary key reference? 845 if not rfield.unique and len(rtable._primarykey)>1: 846 # then it has to be a table level FK 847 if rtablename not in TFK: 848 TFK[rtablename] = {} 849 TFK[rtablename][rfieldname] = field_name 850 else: 851 ftype = ftype + \ 852 types['reference FK'] % dict( 853 constraint_name = constraint_name, # should be quoted 854 foreign_key = '%s (%s)' % (rtablename, 855 rfieldname), 856 table_name = tablename, 857 field_name = field_name, 858 on_delete_action=field.ondelete) 859 else: 860 # make a guess here for circular references 861 if referenced in db: 862 id_fieldname = db[referenced]._id.name 863 elif referenced == tablename: 864 id_fieldname = table._id.name 865 else: #make a guess 866 id_fieldname = 'id' 867 ftype = types[field_type[:9]] % dict( 868 index_name = field_name+'__idx', 869 field_name = field_name, 870 constraint_name = constraint_name, 871 foreign_key = '%s (%s)' % (referenced, 872 id_fieldname), 873 on_delete_action=field.ondelete) 874 elif field_type.startswith('list:reference'): 875 ftype = types[field_type[:14]] 876 elif field_type.startswith('decimal'): 877 precision, scale = map(int,field_type[8:-1].split(',')) 878 ftype = types[field_type[:7]] % \ 879 dict(precision=precision,scale=scale) 880 elif field_type.startswith('geo'): 881 if not hasattr(self,'srid'): 882 raise RuntimeError('Adapter does not support geometry') 883 srid = self.srid 884 geotype, parms = field_type[:-1].split('(') 885 if not geotype in types: 886 raise SyntaxError( 887 'Field: unknown field type: %s for %s' \ 888 % (field_type, field_name)) 889 ftype = types[geotype] 890 if self.dbengine == 'postgres' and geotype == 'geometry': 891 # parameters: schema, srid, dimension 892 dimension = 2 # GIS.dimension ??? 893 parms = parms.split(',') 894 if len(parms) == 3: 895 schema, srid, dimension = parms 896 elif len(parms) == 2: 897 schema, srid = parms 898 else: 899 schema = parms[0] 900 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 901 ftype = ftype % dict(schema=schema, 902 tablename=tablename, 903 fieldname=field_name, srid=srid, 904 dimension=dimension) 905 postcreation_fields.append(ftype) 906 elif not field_type in types: 907 raise SyntaxError('Field: unknown field type: %s for %s' % \ 908 (field_type, field_name)) 909 else: 910 ftype = types[field_type]\ 911 % dict(length=field.length) 912 if not field_type.startswith('id') and \ 913 not field_type.startswith('reference'): 914 if field.notnull: 915 ftype += ' NOT NULL' 916 else: 917 ftype += self.ALLOW_NULL() 918 if field.unique: 919 ftype += ' UNIQUE' 920 if field.custom_qualifier: 921 ftype += ' %s' % field.custom_qualifier 922 923 # add to list of fields 924 sql_fields[field_name] = dict( 925 length=field.length, 926 unique=field.unique, 927 notnull=field.notnull, 928 sortable=sortable, 929 type=str(field_type), 930 sql=ftype) 931 932 if field.notnull and not field.default is None: 933 # Caveat: sql_fields and sql_fields_aux 934 # differ for default values. 935 # sql_fields is used to trigger migrations and sql_fields_aux 936 # is used for create tables. 937 # The reason is that we do not want to trigger 938 # a migration simply because a default value changes. 939 not_null = self.NOT_NULL(field.default, field_type) 940 ftype = ftype.replace('NOT NULL', not_null) 941 sql_fields_aux[field_name] = dict(sql=ftype) 942 # Postgres - PostGIS: 943 # geometry fields are added after the table has been created, not now 944 if not (self.dbengine == 'postgres' and \ 945 field_type.startswith('geom')): 946 fields.append('%s %s' % (field_name, ftype)) 947 other = ';' 948 949 # backend-specific extensions to fields 950 if self.dbengine == 'mysql': 951 if not hasattr(table, "_primarykey"): 952 fields.append('PRIMARY KEY(%s)' % table._id.name) 953 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 954 955 fields = ',\n '.join(fields) 956 for rtablename in TFK: 957 rfields = TFK[rtablename] 958 pkeys = db[rtablename]._primarykey 959 fkeys = [ rfields[k] for k in pkeys ] 960 fields = fields + ',\n ' + \ 961 types['reference TFK'] % dict( 962 table_name = tablename, 963 field_name=', '.join(fkeys), 964 foreign_table = rtablename, 965 foreign_key = ', '.join(pkeys), 966 on_delete_action = field.ondelete) 967 968 if getattr(table,'_primarykey',None): 969 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 970 (tablename, fields, 971 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 972 else: 973 query = "CREATE TABLE %s(\n %s\n)%s" % \ 974 (tablename, fields, other) 975 976 if self.uri.startswith('sqlite:///') \ 977 or self.uri.startswith('spatialite:///'): 978 path_encoding = sys.getfilesystemencoding() \ 979 or locale.getdefaultlocale()[1] or 'utf8' 980 dbpath = self.uri[9:self.uri.rfind('/')]\ 981 .decode('utf8').encode(path_encoding) 982 else: 983 dbpath = self.folder 984 985 if not migrate: 986 return query 987 elif self.uri.startswith('sqlite:memory')\ 988 or self.uri.startswith('spatialite:memory'): 989 table._dbt = None 990 elif isinstance(migrate, str): 991 table._dbt = pjoin(dbpath, migrate) 992 else: 993 table._dbt = pjoin( 994 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 995 996 if not table._dbt or not self.file_exists(table._dbt): 997 if table._dbt: 998 self.log('timestamp: %s\n%s\n' 999 % (datetime.datetime.today().isoformat(), 1000 query), table) 1001 if not fake_migrate: 1002 self.create_sequence_and_triggers(query,table) 1003 table._db.commit() 1004 # Postgres geom fields are added now, 1005 # after the table has been created 1006 for query in postcreation_fields: 1007 self.execute(query) 1008 table._db.commit() 1009 if table._dbt: 1010 tfile = self.file_open(table._dbt, 'w') 1011 pickle.dump(sql_fields, tfile) 1012 self.file_close(tfile) 1013 if fake_migrate: 1014 self.log('faked!\n', table) 1015 else: 1016 self.log('success!\n', table) 1017 else: 1018 tfile = self.file_open(table._dbt, 'r') 1019 try: 1020 sql_fields_old = pickle.load(tfile) 1021 except EOFError: 1022 self.file_close(tfile) 1023 raise RuntimeError('File %s appears corrupted' % table._dbt) 1024 self.file_close(tfile) 1025 if sql_fields != sql_fields_old: 1026 self.migrate_table(table, 1027 sql_fields, sql_fields_old, 1028 sql_fields_aux, None, 1029 fake_migrate=fake_migrate) 1030 return query
1031
1032 - def migrate_table( 1033 self, 1034 table, 1035 sql_fields, 1036 sql_fields_old, 1037 sql_fields_aux, 1038 logfile, 1039 fake_migrate=False, 1040 ):
1041 1042 # logfile is deprecated (moved to adapter.log method) 1043 db = table._db 1044 db._migrated.append(table._tablename) 1045 tablename = table._tablename 1046 def fix(item): 1047 k,v=item 1048 if not isinstance(v,dict): 1049 v=dict(type='unknown',sql=v) 1050 return k.lower(),v
1051 # make sure all field names are lower case to avoid 1052 # migrations because of case cahnge 1053 sql_fields = dict(map(fix,sql_fields.iteritems())) 1054 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1055 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1056 if db._debug: 1057 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1058 1059 keys = sql_fields.keys() 1060 for key in sql_fields_old: 1061 if not key in keys: 1062 keys.append(key) 1063 new_add = self.concat_add(tablename) 1064 1065 metadata_change = False 1066 sql_fields_current = copy.copy(sql_fields_old) 1067 for key in keys: 1068 query = None 1069 if not key in sql_fields_old: 1070 sql_fields_current[key] = sql_fields[key] 1071 if self.dbengine in ('postgres',) and \ 1072 sql_fields[key]['type'].startswith('geometry'): 1073 # 'sql' == ftype in sql 1074 query = [ sql_fields[key]['sql'] ] 1075 else: 1076 query = ['ALTER TABLE %s ADD %s %s;' % \ 1077 (tablename, key, 1078 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1079 metadata_change = True 1080 elif self.dbengine in ('sqlite', 'spatialite'): 1081 if key in sql_fields: 1082 sql_fields_current[key] = sql_fields[key] 1083 metadata_change = True 1084 elif not key in sql_fields: 1085 del sql_fields_current[key] 1086 ftype = sql_fields_old[key]['type'] 1087 if (self.dbengine in ('postgres',) and 1088 ftype.startswith('geometry')): 1089 geotype, parms = ftype[:-1].split('(') 1090 schema = parms.split(',')[0] 1091 query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ 1092 "'%(table)s', '%(field)s');" % 1093 dict(schema=schema, table=tablename, field=key,) ] 1094 elif self.dbengine in ('firebird',): 1095 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1096 else: 1097 query = ['ALTER TABLE %s DROP COLUMN %s;' % 1098 (tablename, key)] 1099 metadata_change = True 1100 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1101 and not (key in table.fields and 1102 isinstance(table[key].type, SQLCustomType)) \ 1103 and not sql_fields[key]['type'].startswith('reference')\ 1104 and not sql_fields[key]['type'].startswith('double')\ 1105 and not sql_fields[key]['type'].startswith('id'): 1106 sql_fields_current[key] = sql_fields[key] 1107 t = tablename 1108 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1109 if self.dbengine in ('firebird',): 1110 drop_expr = 'ALTER TABLE %s DROP %s;' 1111 else: 1112 drop_expr = 'ALTER TABLE %s DROP COLUMN %s;' 1113 key_tmp = key + '__tmp' 1114 query = ['ALTER TABLE %s ADD %s %s;' % (t, key_tmp, tt), 1115 'UPDATE %s SET %s=%s;' % (t, key_tmp, key), 1116 drop_expr % (t, key), 1117 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1118 'UPDATE %s SET %s=%s;' % (t, key, key_tmp), 1119 drop_expr % (t, key_tmp)] 1120 metadata_change = True 1121 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1122 sql_fields_current[key] = sql_fields[key] 1123 metadata_change = True 1124 1125 if query: 1126 self.log('timestamp: %s\n' 1127 % datetime.datetime.today().isoformat(), table) 1128 db['_lastsql'] = '\n'.join(query) 1129 for sub_query in query: 1130 self.log(sub_query + '\n', table) 1131 if fake_migrate: 1132 if db._adapter.commit_on_alter_table: 1133 self.save_dbt(table,sql_fields_current) 1134 self.log('faked!\n', table) 1135 else: 1136 self.execute(sub_query) 1137 # Caveat: mysql, oracle and firebird 1138 # do not allow multiple alter table 1139 # in one transaction so we must commit 1140 # partial transactions and 1141 # update table._dbt after alter table. 1142 if db._adapter.commit_on_alter_table: 1143 db.commit() 1144 self.save_dbt(table,sql_fields_current) 1145 self.log('success!\n', table) 1146 1147 elif metadata_change: 1148 self.save_dbt(table,sql_fields_current) 1149 1150 if metadata_change and not (query and db._adapter.commit_on_alter_table): 1151 db.commit() 1152 self.save_dbt(table,sql_fields_current) 1153 self.log('success!\n', table) 1154
1155 - def save_dbt(self,table, sql_fields_current):
1156 tfile = self.file_open(table._dbt, 'w') 1157 pickle.dump(sql_fields_current, tfile) 1158 self.file_close(tfile)
1159
1160 - def LOWER(self, first):
1161 return 'LOWER(%s)' % self.expand(first)
1162
1163 - def UPPER(self, first):
1164 return 'UPPER(%s)' % self.expand(first)
1165
1166 - def COUNT(self, first, distinct=None):
1167 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1168 % self.expand(first)
1169
1170 - def EXTRACT(self, first, what):
1171 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1172
1173 - def EPOCH(self, first):
1174 return self.EXTRACT(first, 'epoch')
1175
1176 - def LENGTH(self, first):
1177 return "LENGTH(%s)" % self.expand(first)
1178
1179 - def AGGREGATE(self, first, what):
1180 return "%s(%s)" % (what, self.expand(first))
1181
1182 - def JOIN(self):
1183 return 'JOIN'
1184
1185 - def LEFT_JOIN(self):
1186 return 'LEFT JOIN'
1187
1188 - def RANDOM(self):
1189 return 'Random()'
1190
1191 - def NOT_NULL(self, default, field_type):
1192 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1193
1194 - def COALESCE(self, first, second):
1195 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1196 return 'COALESCE(%s)' % ','.join(expressions)
1197
1198 - def COALESCE_ZERO(self, first):
1199 return 'COALESCE(%s,0)' % self.expand(first)
1200
1201 - def RAW(self, first):
1202 return first
1203
1204 - def ALLOW_NULL(self):
1205 return ''
1206
1207 - def SUBSTRING(self, field, parameters):
1208 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1209
1210 - def PRIMARY_KEY(self, key):
1211 return 'PRIMARY KEY(%s)' % key
1212
1213 - def _drop(self, table, mode):
1214 return ['DROP TABLE %s;' % table]
1215
1216 - def drop(self, table, mode=''):
1217 db = table._db 1218 queries = self._drop(table, mode) 1219 for query in queries: 1220 if table._dbt: 1221 self.log(query + '\n', table) 1222 self.execute(query) 1223 db.commit() 1224 del db[table._tablename] 1225 del db.tables[db.tables.index(table._tablename)] 1226 db._remove_references_to(table) 1227 if table._dbt: 1228 self.file_delete(table._dbt) 1229 self.log('success!\n', table)
1230
1231 - def _insert(self, table, fields):
1232 if fields: 1233 keys = ','.join(f.name for f, v in fields) 1234 values = ','.join(self.expand(v, f.type) for f, v in fields) 1235 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1236 else: 1237 return self._insert_empty(table)
1238
1239 - def _insert_empty(self, table):
1240 return 'INSERT INTO %s DEFAULT VALUES;' % table
1241
1242 - def insert(self, table, fields):
1243 query = self._insert(table,fields) 1244 try: 1245 self.execute(query) 1246 except Exception: 1247 e = sys.exc_info()[1] 1248 if hasattr(table,'_on_insert_error'): 1249 return table._on_insert_error(table,fields,e) 1250 raise e 1251 if hasattr(table,'_primarykey'): 1252 return dict([(k[0].name, k[1]) for k in fields \ 1253 if k[0].name in table._primarykey]) 1254 id = self.lastrowid(table) 1255 if not isinstance(id,int): 1256 return id 1257 rid = Reference(id) 1258 (rid._table, rid._record) = (table, None) 1259 return rid
1260
1261 - def bulk_insert(self, table, items):
1262 return [self.insert(table,item) for item in items]
1263
1264 - def NOT(self, first):
1265 return '(NOT %s)' % self.expand(first)
1266
1267 - def AND(self, first, second):
1268 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1269
1270 - def OR(self, first, second):
1271 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1272
1273 - def BELONGS(self, first, second):
1274 if isinstance(second, str): 1275 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1276 if not second: 1277 return '(1=0)' 1278 items = ','.join(self.expand(item, first.type) for item in second) 1279 return '(%s IN (%s))' % (self.expand(first), items)
1280
1281 - def REGEXP(self, first, second):
1282 "regular expression operator" 1283 raise NotImplementedError
1284
1285 - def LIKE(self, first, second):
1286 "case sensitive like operator" 1287 raise NotImplementedError
1288
1289 - def ILIKE(self, first, second):
1290 "case in-sensitive like operator" 1291 return '(%s LIKE %s)' % (self.expand(first), 1292 self.expand(second, 'string'))
1293
1294 - def STARTSWITH(self, first, second):
1295 return '(%s LIKE %s)' % (self.expand(first), 1296 self.expand(second+'%', 'string'))
1297
1298 - def ENDSWITH(self, first, second):
1299 return '(%s LIKE %s)' % (self.expand(first), 1300 self.expand('%'+second, 'string'))
1301
1302 - def CONTAINS(self,first,second,case_sensitive=False):
1303 if first.type in ('string','text', 'json'): 1304 if isinstance(second,Expression): 1305 second = Expression(None,self.CONCAT('%',Expression( 1306 None,self.REPLACE(second,('%','%%'))),'%')) 1307 else: 1308 second = '%'+str(second).replace('%','%%')+'%' 1309 elif first.type.startswith('list:'): 1310 if isinstance(second,Expression): 1311 second = Expression(None,self.CONCAT( 1312 '%|',Expression(None,self.REPLACE( 1313 Expression(None,self.REPLACE( 1314 second,('%','%%'))),('|','||'))),'|%')) 1315 else: 1316 second = '%|'+str(second).replace('%','%%')\ 1317 .replace('|','||')+'|%' 1318 op = case_sensitive and self.LIKE or self.ILIKE 1319 return op(first,second)
1320
1321 - def EQ(self, first, second=None):
1322 if second is None: 1323 return '(%s IS NULL)' % self.expand(first) 1324 return '(%s = %s)' % (self.expand(first), 1325 self.expand(second, first.type))
1326
1327 - def NE(self, first, second=None):
1328 if second is None: 1329 return '(%s IS NOT NULL)' % self.expand(first) 1330 return '(%s <> %s)' % (self.expand(first), 1331 self.expand(second, first.type))
1332
1333 - def LT(self,first,second=None):
1334 if second is None: 1335 raise RuntimeError("Cannot compare %s < None" % first) 1336 return '(%s < %s)' % (self.expand(first), 1337 self.expand(second,first.type))
1338
1339 - def LE(self,first,second=None):
1340 if second is None: 1341 raise RuntimeError("Cannot compare %s <= None" % first) 1342 return '(%s <= %s)' % (self.expand(first), 1343 self.expand(second,first.type))
1344
1345 - def GT(self,first,second=None):
1346 if second is None: 1347 raise RuntimeError("Cannot compare %s > None" % first) 1348 return '(%s > %s)' % (self.expand(first), 1349 self.expand(second,first.type))
1350
1351 - def GE(self,first,second=None):
1352 if second is None: 1353 raise RuntimeError("Cannot compare %s >= None" % first) 1354 return '(%s >= %s)' % (self.expand(first), 1355 self.expand(second,first.type))
1356
1357 - def is_numerical_type(self, ftype):
1358 return ftype in ('integer','boolean','double','bigint') or \ 1359 ftype.startswith('decimal')
1360
1361 - def REPLACE(self, first, (second, third)):
1362 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1363 self.expand(second,'string'), 1364 self.expand(third,'string'))
1365
1366 - def CONCAT(self, *items):
1367 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1368
1369 - def ADD(self, first, second):
1370 if self.is_numerical_type(first.type): 1371 return '(%s + %s)' % (self.expand(first), 1372 self.expand(second, first.type)) 1373 else: 1374 return self.CONCAT(first, second)
1375
1376 - def SUB(self, first, second):
1377 return '(%s - %s)' % (self.expand(first), 1378 self.expand(second, first.type))
1379
1380 - def MUL(self, first, second):
1381 return '(%s * %s)' % (self.expand(first), 1382 self.expand(second, first.type))
1383
1384 - def DIV(self, first, second):
1385 return '(%s / %s)' % (self.expand(first), 1386 self.expand(second, first.type))
1387
1388 - def MOD(self, first, second):
1389 return '(%s %% %s)' % (self.expand(first), 1390 self.expand(second, first.type))
1391
1392 - def AS(self, first, second):
1393 return '%s AS %s' % (self.expand(first), second)
1394
1395 - def ON(self, first, second):
1396 if use_common_filters(second): 1397 second = self.common_filter(second,[first._tablename]) 1398 return '%s ON %s' % (self.expand(first), self.expand(second))
1399
1400 - def INVERT(self, first):
1401 return '%s DESC' % self.expand(first)
1402
1403 - def COMMA(self, first, second):
1404 return '%s, %s' % (self.expand(first), self.expand(second))
1405
1406 - def CAST(self, first, second):
1407 return 'CAST(%s AS %s)' % (first, second)
1408
1409 - def expand(self, expression, field_type=None):
1410 if isinstance(expression, Field): 1411 out = '%s.%s' % (expression.table._tablename, expression.name) 1412 if field_type == 'string' and not expression.type in ( 1413 'string','text','json','password'): 1414 out = self.CAST(out, self.types['text']) 1415 return out 1416 elif isinstance(expression, (Expression, Query)): 1417 first = expression.first 1418 second = expression.second 1419 op = expression.op 1420 optional_args = expression.optional_args or {} 1421 if not second is None: 1422 out = op(first, second, **optional_args) 1423 elif not first is None: 1424 out = op(first,**optional_args) 1425 elif isinstance(op, str): 1426 if op.endswith(';'): 1427 op=op[:-1] 1428 out = '(%s)' % op 1429 else: 1430 out = op() 1431 return out 1432 elif field_type: 1433 return str(self.represent(expression,field_type)) 1434 elif isinstance(expression,(list,tuple)): 1435 return ','.join(self.represent(item,field_type) \ 1436 for item in expression) 1437 elif isinstance(expression, bool): 1438 return '1' if expression else '0' 1439 else: 1440 return str(expression)
1441
1442 - def table_alias(self,name):
1443 return str(name if isinstance(name,Table) else self.db[name])
1444
1445 - def alias(self, table, alias):
1446 """ 1447 Given a table object, makes a new table object 1448 with alias name. 1449 """ 1450 other = copy.copy(table) 1451 other['_ot'] = other._ot or other._tablename 1452 other['ALL'] = SQLALL(other) 1453 other['_tablename'] = alias 1454 for fieldname in other.fields: 1455 other[fieldname] = copy.copy(other[fieldname]) 1456 other[fieldname]._tablename = alias 1457 other[fieldname].tablename = alias 1458 other[fieldname].table = other 1459 table._db[alias] = other 1460 return other
1461
1462 - def _truncate(self, table, mode=''):
1463 tablename = table._tablename 1464 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1465
1466 - def truncate(self, table, mode= ' '):
1467 # Prepare functions "write_to_logfile" and "close_logfile" 1468 try: 1469 queries = table._db._adapter._truncate(table, mode) 1470 for query in queries: 1471 self.log(query + '\n', table) 1472 self.execute(query) 1473 table._db.commit() 1474 self.log('success!\n', table) 1475 finally: 1476 pass
1477
1478 - def _update(self, tablename, query, fields):
1479 if query: 1480 if use_common_filters(query): 1481 query = self.common_filter(query, [tablename]) 1482 sql_w = ' WHERE ' + self.expand(query) 1483 else: 1484 sql_w = '' 1485 sql_v = ','.join(['%s=%s' % (field.name, 1486 self.expand(value, field.type)) \ 1487 for (field, value) in fields]) 1488 tablename = "%s" % self.db[tablename] 1489 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1490
1491 - def update(self, tablename, query, fields):
1492 sql = self._update(tablename, query, fields) 1493 try: 1494 self.execute(sql) 1495 except Exception: 1496 e = sys.exc_info()[1] 1497 table = self.db[tablename] 1498 if hasattr(table,'_on_update_error'): 1499 return table._on_update_error(table,query,fields,e) 1500 raise e 1501 try: 1502 return self.cursor.rowcount 1503 except: 1504 return None
1505
1506 - def _delete(self, tablename, query):
1507 if query: 1508 if use_common_filters(query): 1509 query = self.common_filter(query, [tablename]) 1510 sql_w = ' WHERE ' + self.expand(query) 1511 else: 1512 sql_w = '' 1513 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1514
1515 - def delete(self, tablename, query):
1516 sql = self._delete(tablename, query) 1517 ### special code to handle CASCADE in SQLite & SpatiaLite 1518 db = self.db 1519 table = db[tablename] 1520 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1521 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1522 ### end special code to handle CASCADE in SQLite & SpatiaLite 1523 self.execute(sql) 1524 try: 1525 counter = self.cursor.rowcount 1526 except: 1527 counter = None 1528 ### special code to handle CASCADE in SQLite & SpatiaLite 1529 if self.dbengine in ('sqlite', 'spatialite') and counter: 1530 for field in table._referenced_by: 1531 if field.type=='reference '+table._tablename \ 1532 and field.ondelete=='CASCADE': 1533 db(field.belongs(deleted)).delete() 1534 ### end special code to handle CASCADE in SQLite & SpatiaLite 1535 return counter
1536
1537 - def get_table(self, query):
1538 tablenames = self.tables(query) 1539 if len(tablenames)==1: 1540 return tablenames[0] 1541 elif len(tablenames)<1: 1542 raise RuntimeError("No table selected") 1543 else: 1544 raise RuntimeError("Too many tables selected")
1545
1546 - def expand_all(self, fields, tablenames):
1547 db = self.db 1548 new_fields = [] 1549 append = new_fields.append 1550 for item in fields: 1551 if isinstance(item,SQLALL): 1552 new_fields += item._table 1553 elif isinstance(item,str): 1554 if REGEX_TABLE_DOT_FIELD.match(item): 1555 tablename,fieldname = item.split('.') 1556 append(db[tablename][fieldname]) 1557 else: 1558 append(Expression(db,lambda item=item:item)) 1559 else: 1560 append(item) 1561 # ## if no fields specified take them all from the requested tables 1562 if not new_fields: 1563 for table in tablenames: 1564 for field in db[table]: 1565 append(field) 1566 return new_fields
1567
1568 - def _select(self, query, fields, attributes):
1569 tables = self.tables 1570 for key in set(attributes.keys())-SELECT_ARGS: 1571 raise SyntaxError('invalid select attribute: %s' % key) 1572 args_get = attributes.get 1573 tablenames = tables(query) 1574 tablenames_for_common_filters = tablenames 1575 for field in fields: 1576 if isinstance(field, basestring) \ 1577 and REGEX_TABLE_DOT_FIELD.match(field): 1578 tn,fn = field.split('.') 1579 field = self.db[tn][fn] 1580 for tablename in tables(field): 1581 if not tablename in tablenames: 1582 tablenames.append(tablename) 1583 1584 if len(tablenames) < 1: 1585 raise SyntaxError('Set: no tables selected') 1586 self._colnames = map(self.expand, fields) 1587 def geoexpand(field): 1588 if isinstance(field.type,str) and field.type.startswith('geometry'): 1589 field = field.st_astext() 1590 return self.expand(field)
1591 sql_f = ', '.join(map(geoexpand, fields)) 1592 sql_o = '' 1593 sql_s = '' 1594 left = args_get('left', False) 1595 inner_join = args_get('join', False) 1596 distinct = args_get('distinct', False) 1597 groupby = args_get('groupby', False) 1598 orderby = args_get('orderby', False) 1599 having = args_get('having', False) 1600 limitby = args_get('limitby', False) 1601 orderby_on_limitby = args_get('orderby_on_limitby', True) 1602 for_update = args_get('for_update', False) 1603 if self.can_select_for_update is False and for_update is True: 1604 raise SyntaxError('invalid select attribute: for_update') 1605 if distinct is True: 1606 sql_s += 'DISTINCT' 1607 elif distinct: 1608 sql_s += 'DISTINCT ON (%s)' % distinct 1609 if inner_join: 1610 icommand = self.JOIN() 1611 if not isinstance(inner_join, (tuple, list)): 1612 inner_join = [inner_join] 1613 ijoint = [t._tablename for t in inner_join 1614 if not isinstance(t,Expression)] 1615 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1616 itables_to_merge={} #issue 490 1617 [itables_to_merge.update( 1618 dict.fromkeys(tables(t))) for t in ijoinon] 1619 ijoinont = [t.first._tablename for t in ijoinon] 1620 [itables_to_merge.pop(t) for t in ijoinont 1621 if t in itables_to_merge] #issue 490 1622 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1623 iexcluded = [t for t in tablenames 1624 if not t in iimportant_tablenames] 1625 if left: 1626 join = attributes['left'] 1627 command = self.LEFT_JOIN() 1628 if not isinstance(join, (tuple, list)): 1629 join = [join] 1630 joint = [t._tablename for t in join 1631 if not isinstance(t, Expression)] 1632 joinon = [t for t in join if isinstance(t, Expression)] 1633 #patch join+left patch (solves problem with ordering in left joins) 1634 tables_to_merge={} 1635 [tables_to_merge.update( 1636 dict.fromkeys(tables(t))) for t in joinon] 1637 joinont = [t.first._tablename for t in joinon] 1638 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1639 tablenames_for_common_filters = [t for t in tablenames 1640 if not t in joinont ] 1641 important_tablenames = joint + joinont + tables_to_merge.keys() 1642 excluded = [t for t in tablenames 1643 if not t in important_tablenames ] 1644 else: 1645 excluded = tablenames 1646 1647 if use_common_filters(query): 1648 query = self.common_filter(query,tablenames_for_common_filters) 1649 sql_w = ' WHERE ' + self.expand(query) if query else '' 1650 1651 if inner_join and not left: 1652 sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ 1653 itables_to_merge.keys()]) 1654 for t in ijoinon: 1655 sql_t += ' %s %s' % (icommand, t) 1656 elif not inner_join and left: 1657 sql_t = ', '.join([self.table_alias(t) for t in excluded + \ 1658 tables_to_merge.keys()]) 1659 if joint: 1660 sql_t += ' %s %s' % (command, 1661 ','.join([self.table_alias(t) for t in joint])) 1662 for t in joinon: 1663 sql_t += ' %s %s' % (command, t) 1664 elif inner_join and left: 1665 all_tables_in_query = set(important_tablenames + \ 1666 iimportant_tablenames + \ 1667 tablenames) 1668 tables_in_joinon = set(joinont + ijoinont) 1669 tables_not_in_joinon = \ 1670 all_tables_in_query.difference(tables_in_joinon) 1671 sql_t = ','.join([self.table_alias(t) for t in tables_not_in_joinon]) 1672 for t in ijoinon: 1673 sql_t += ' %s %s' % (icommand, t) 1674 if joint: 1675 sql_t += ' %s %s' % (command, 1676 ','.join([self.table_alias(t) for t in joint])) 1677 for t in joinon: 1678 sql_t += ' %s %s' % (command, t) 1679 else: 1680 sql_t = ', '.join(self.table_alias(t) for t in tablenames) 1681 if groupby: 1682 if isinstance(groupby, (list, tuple)): 1683 groupby = xorify(groupby) 1684 sql_o += ' GROUP BY %s' % self.expand(groupby) 1685 if having: 1686 sql_o += ' HAVING %s' % attributes['having'] 1687 if orderby: 1688 if isinstance(orderby, (list, tuple)): 1689 orderby = xorify(orderby) 1690 if str(orderby) == '<random>': 1691 sql_o += ' ORDER BY %s' % self.RANDOM() 1692 else: 1693 sql_o += ' ORDER BY %s' % self.expand(orderby) 1694 if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby): 1695 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1696 # oracle does not support limitby 1697 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1698 if for_update and self.can_select_for_update is True: 1699 sql = sql.rstrip(';') + ' FOR UPDATE;' 1700 return sql 1701
1702 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1703 if limitby: 1704 (lmin, lmax) = limitby 1705 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1706 return 'SELECT %s %s FROM %s%s%s;' % \ 1707 (sql_s, sql_f, sql_t, sql_w, sql_o)
1708
1709 - def _fetchall(self):
1710 return self.cursor.fetchall()
1711
1712 - def _select_aux(self,sql,fields,attributes):
1713 args_get = attributes.get 1714 cache = args_get('cache',None) 1715 if not cache: 1716 self.execute(sql) 1717 rows = self._fetchall() 1718 else: 1719 (cache_model, time_expire) = cache 1720 key = self.uri + '/' + sql + '/rows' 1721 if len(key)>200: key = hashlib_md5(key).hexdigest() 1722 def _select_aux2(): 1723 self.execute(sql) 1724 return self._fetchall()
1725 rows = cache_model(key,_select_aux2,time_expire) 1726 if isinstance(rows,tuple): 1727 rows = list(rows) 1728 limitby = args_get('limitby', None) or (0,) 1729 rows = self.rowslice(rows,limitby[0],None) 1730 processor = args_get('processor',self.parse) 1731 cacheable = args_get('cacheable',False) 1732 return processor(rows,fields,self._colnames,cacheable=cacheable) 1733
1734 - def select(self, query, fields, attributes):
1735 """ 1736 Always returns a Rows object, possibly empty. 1737 """ 1738 sql = self._select(query, fields, attributes) 1739 cache = attributes.get('cache', None) 1740 if cache and attributes.get('cacheable',False): 1741 del attributes['cache'] 1742 (cache_model, time_expire) = cache 1743 key = self.uri + '/' + sql 1744 if len(key)>200: key = hashlib_md5(key).hexdigest() 1745 args = (sql,fields,attributes) 1746 return cache_model( 1747 key, 1748 lambda self=self,args=args:self._select_aux(*args), 1749 time_expire) 1750 else: 1751 return self._select_aux(sql,fields,attributes)
1752
1753 - def _count(self, query, distinct=None):
1754 tablenames = self.tables(query) 1755 if query: 1756 if use_common_filters(query): 1757 query = self.common_filter(query, tablenames) 1758 sql_w = ' WHERE ' + self.expand(query) 1759 else: 1760 sql_w = '' 1761 sql_t = ','.join(self.table_alias(t) for t in tablenames) 1762 if distinct: 1763 if isinstance(distinct,(list, tuple)): 1764 distinct = xorify(distinct) 1765 sql_d = self.expand(distinct) 1766 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1767 (sql_d, sql_t, sql_w) 1768 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1769
1770 - def count(self, query, distinct=None):
1771 self.execute(self._count(query, distinct)) 1772 return self.cursor.fetchone()[0]
1773
1774 - def tables(self, *queries):
1775 tables = set() 1776 for query in queries: 1777 if isinstance(query, Field): 1778 tables.add(query.tablename) 1779 elif isinstance(query, (Expression, Query)): 1780 if not query.first is None: 1781 tables = tables.union(self.tables(query.first)) 1782 if not query.second is None: 1783 tables = tables.union(self.tables(query.second)) 1784 return list(tables)
1785
1786 - def commit(self):
1787 if self.connection: 1788 return self.connection.commit()
1789
1790 - def rollback(self):
1791 if self.connection: 1792 return self.connection.rollback()
1793
1794 - def close_connection(self):
1795 if self.connection: 1796 r = self.connection.close() 1797 self.connection = None 1798 return r
1799
1800 - def distributed_transaction_begin(self, key):
1801 return
1802
1803 - def prepare(self, key):
1804 if self.connection: self.connection.prepare()
1805
1806 - def commit_prepared(self, key):
1807 if self.connection: self.connection.commit()
1808
1809 - def rollback_prepared(self, key):
1810 if self.connection: self.connection.rollback()
1811
1812 - def concat_add(self, tablename):
1813 return ', ADD '
1814
1815 - def constraint_name(self, table, fieldname):
1816 return '%s_%s__constraint' % (table,fieldname)
1817
1818 - def create_sequence_and_triggers(self, query, table, **args):
1819 self.execute(query)
1820
1821 - def log_execute(self, *a, **b):
1822 if not self.connection: return None 1823 command = a[0] 1824 if hasattr(self,'filter_sql_command'): 1825 command = self.filter_sql_command(command) 1826 if self.db._debug: 1827 LOGGER.debug('SQL: %s' % command) 1828 self.db._lastsql = command 1829 t0 = time.time() 1830 ret = self.cursor.execute(command, *a[1:], **b) 1831 self.db._timings.append((command,time.time()-t0)) 1832 del self.db._timings[:-TIMINGSSIZE] 1833 return ret
1834
1835 - def execute(self, *a, **b):
1836 return self.log_execute(*a, **b)
1837
1838 - def represent(self, obj, fieldtype):
1839 field_is_type = fieldtype.startswith 1840 if isinstance(obj, CALLABLETYPES): 1841 obj = obj() 1842 if isinstance(fieldtype, SQLCustomType): 1843 value = fieldtype.encoder(obj) 1844 if fieldtype.type in ('string','text', 'json'): 1845 return self.adapt(value) 1846 return value 1847 if isinstance(obj, (Expression, Field)): 1848 return str(obj) 1849 if field_is_type('list:'): 1850 if not obj: 1851 obj = [] 1852 elif not isinstance(obj, (list, tuple)): 1853 obj = [obj] 1854 if field_is_type('list:string'): 1855 obj = map(str,obj) 1856 else: 1857 obj = map(int,[o for o in obj if o != '']) 1858 # we don't want to bar_encode json objects 1859 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1860 obj = bar_encode(obj) 1861 if obj is None: 1862 return 'NULL' 1863 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1864 return 'NULL' 1865 r = self.represent_exceptions(obj, fieldtype) 1866 if not r is None: 1867 return r 1868 if fieldtype == 'boolean': 1869 if obj and not str(obj)[:1].upper() in '0F': 1870 return self.smart_adapt(self.TRUE) 1871 else: 1872 return self.smart_adapt(self.FALSE) 1873 if fieldtype == 'id' or fieldtype == 'integer': 1874 return str(long(obj)) 1875 if field_is_type('decimal'): 1876 return str(obj) 1877 elif field_is_type('reference'): # reference 1878 if fieldtype.find('.')>0: 1879 return repr(obj) 1880 elif isinstance(obj, (Row, Reference)): 1881 return str(obj['id']) 1882 return str(long(obj)) 1883 elif fieldtype == 'double': 1884 return repr(float(obj)) 1885 if isinstance(obj, unicode): 1886 obj = obj.encode(self.db_codec) 1887 if fieldtype == 'blob': 1888 obj = base64.b64encode(str(obj)) 1889 elif fieldtype == 'date': 1890 if isinstance(obj, (datetime.date, datetime.datetime)): 1891 obj = obj.isoformat()[:10] 1892 else: 1893 obj = str(obj) 1894 elif fieldtype == 'datetime': 1895 if isinstance(obj, datetime.datetime): 1896 obj = obj.isoformat(self.T_SEP)[:19] 1897 elif isinstance(obj, datetime.date): 1898 obj = obj.isoformat()[:10]+self.T_SEP+'00:00:00' 1899 else: 1900 obj = str(obj) 1901 elif fieldtype == 'time': 1902 if isinstance(obj, datetime.time): 1903 obj = obj.isoformat()[:10] 1904 else: 1905 obj = str(obj) 1906 elif fieldtype == 'json': 1907 if not self.native_json: 1908 if have_serializers: 1909 obj = serializers.json(obj) 1910 elif simplejson: 1911 obj = simplejson.dumps(obj) 1912 else: 1913 raise RuntimeError("missing simplejson") 1914 if not isinstance(obj,bytes): 1915 obj = bytes(obj) 1916 try: 1917 obj.decode(self.db_codec) 1918 except: 1919 obj = obj.decode('latin1').encode(self.db_codec) 1920 return self.adapt(obj)
1921
1922 - def represent_exceptions(self, obj, fieldtype):
1923 return None
1924
1925 - def lastrowid(self, table):
1926 return None
1927
1928 - def rowslice(self, rows, minimum=0, maximum=None):
1929 """ 1930 By default this function does nothing; 1931 overload when db does not do slicing. 1932 """ 1933 return rows
1934
1935 - def parse_value(self, value, field_type, blob_decode=True):
1936 if field_type != 'blob' and isinstance(value, str): 1937 try: 1938 value = value.decode(self.db._db_codec) 1939 except Exception: 1940 pass 1941 if isinstance(value, unicode): 1942 value = value.encode('utf-8') 1943 if isinstance(field_type, SQLCustomType): 1944 value = field_type.decoder(value) 1945 if not isinstance(field_type, str) or value is None: 1946 return value 1947 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1948 return value 1949 elif field_type.startswith('geo'): 1950 return value 1951 elif field_type == 'blob' and not blob_decode: 1952 return value 1953 else: 1954 key = REGEX_TYPE.match(field_type).group(0) 1955 return self.parsemap[key](value,field_type)
1956
1957 - def parse_reference(self, value, field_type):
1958 referee = field_type[10:].strip() 1959 if not '.' in referee: 1960 value = Reference(value) 1961 value._table, value._record = self.db[referee], None 1962 return value
1963
1964 - def parse_boolean(self, value, field_type):
1965 return value == self.TRUE or str(value)[:1].lower() == 't'
1966
1967 - def parse_date(self, value, field_type):
1968 if isinstance(value, datetime.datetime): 1969 return value.date() 1970 if not isinstance(value, (datetime.date,datetime.datetime)): 1971 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1972 value = datetime.date(y, m, d) 1973 return value
1974
1975 - def parse_time(self, value, field_type):
1976 if not isinstance(value, datetime.time): 1977 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1978 if len(time_items) == 3: 1979 (h, mi, s) = time_items 1980 else: 1981 (h, mi, s) = time_items + [0] 1982 value = datetime.time(h, mi, s) 1983 return value
1984
1985 - def parse_datetime(self, value, field_type):
1986 if not isinstance(value, datetime.datetime): 1987 value = str(value) 1988 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1989 if '+' in timezone: 1990 ms,tz = timezone.split('+') 1991 h,m = tz.split(':') 1992 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1993 elif '-' in timezone: 1994 ms,tz = timezone.split('-') 1995 h,m = tz.split(':') 1996 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1997 else: 1998 dt = None 1999 (y, m, d) = map(int,date_part.split('-')) 2000 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 2001 while len(time_parts)<3: time_parts.append(0) 2002 time_items = map(int,time_parts) 2003 (h, mi, s) = time_items 2004 value = datetime.datetime(y, m, d, h, mi, s) 2005 if dt: 2006 value = value + dt 2007 return value
2008
2009 - def parse_blob(self, value, field_type):
2010 return base64.b64decode(str(value))
2011
2012 - def parse_decimal(self, value, field_type):
2013 decimals = int(field_type[8:-1].split(',')[-1]) 2014 if self.dbengine in ('sqlite', 'spatialite'): 2015 value = ('%.' + str(decimals) + 'f') % value 2016 if not isinstance(value, decimal.Decimal): 2017 value = decimal.Decimal(str(value)) 2018 return value
2019
2020 - def parse_list_integers(self, value, field_type):
2021 if not isinstance(self, NoSQLAdapter): 2022 value = bar_decode_integer(value) 2023 return value
2024
2025 - def parse_list_references(self, value, field_type):
2026 if not isinstance(self, NoSQLAdapter): 2027 value = bar_decode_integer(value) 2028 return [self.parse_reference(r, field_type[5:]) for r in value]
2029
2030 - def parse_list_strings(self, value, field_type):
2031 if not isinstance(self, NoSQLAdapter): 2032 value = bar_decode_string(value) 2033 return value
2034
2035 - def parse_id(self, value, field_type):
2036 return long(value)
2037
2038 - def parse_integer(self, value, field_type):
2039 return long(value)
2040
2041 - def parse_double(self, value, field_type):
2042 return float(value)
2043
2044 - def parse_json(self, value, field_type):
2045 if not self.native_json: 2046 if not isinstance(value, basestring): 2047 raise RuntimeError('json data not a string') 2048 if isinstance(value, unicode): 2049 value = value.encode('utf-8') 2050 if have_serializers: 2051 value = serializers.loads_json(value) 2052 elif simplejson: 2053 value = simplejson.loads(value) 2054 else: 2055 raise RuntimeError("missing simplejson") 2056 return value
2057
2058 - def build_parsemap(self):
2059 self.parsemap = { 2060 'id':self.parse_id, 2061 'integer':self.parse_integer, 2062 'bigint':self.parse_integer, 2063 'float':self.parse_double, 2064 'double':self.parse_double, 2065 'reference':self.parse_reference, 2066 'boolean':self.parse_boolean, 2067 'date':self.parse_date, 2068 'time':self.parse_time, 2069 'datetime':self.parse_datetime, 2070 'blob':self.parse_blob, 2071 'decimal':self.parse_decimal, 2072 'json':self.parse_json, 2073 'list:integer':self.parse_list_integers, 2074 'list:reference':self.parse_list_references, 2075 'list:string':self.parse_list_strings, 2076 }
2077
2078 - def parse(self, rows, fields, colnames, blob_decode=True, 2079 cacheable = False):
2080 db = self.db 2081 virtualtables = [] 2082 new_rows = [] 2083 tmps = [] 2084 for colname in colnames: 2085 if not REGEX_TABLE_DOT_FIELD.match(colname): 2086 tmps.append(None) 2087 else: 2088 (tablename, _the_sep_, fieldname) = colname.partition('.') 2089 table = db[tablename] 2090 field = table[fieldname] 2091 ft = field.type 2092 tmps.append((tablename,fieldname,table,field,ft)) 2093 for (i,row) in enumerate(rows): 2094 new_row = Row() 2095 for (j,colname) in enumerate(colnames): 2096 value = row[j] 2097 tmp = tmps[j] 2098 if tmp: 2099 (tablename,fieldname,table,field,ft) = tmp 2100 if tablename in new_row: 2101 colset = new_row[tablename] 2102 else: 2103 colset = new_row[tablename] = Row() 2104 if tablename not in virtualtables: 2105 virtualtables.append(tablename) 2106 value = self.parse_value(value,ft,blob_decode) 2107 if field.filter_out: 2108 value = field.filter_out(value) 2109 colset[fieldname] = value 2110 2111 # for backward compatibility 2112 if ft=='id' and fieldname!='id' and \ 2113 not 'id' in table.fields: 2114 colset['id'] = value 2115 2116 if ft == 'id' and not cacheable: 2117 # temporary hack to deal with 2118 # GoogleDatastoreAdapter 2119 # references 2120 if isinstance(self, GoogleDatastoreAdapter): 2121 id = value.key().id_or_name() 2122 colset[fieldname] = id 2123 colset.gae_item = value 2124 else: 2125 id = value 2126 colset.update_record = RecordUpdater(colset,table,id) 2127 colset.delete_record = RecordDeleter(table,id) 2128 if table._db._lazy_tables: 2129 colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) 2130 for rfield in table._referenced_by: 2131 referee_link = db._referee_name and \ 2132 db._referee_name % dict( 2133 table=rfield.tablename,field=rfield.name) 2134 if referee_link and not referee_link in colset: 2135 colset[referee_link] = LazySet(rfield,id) 2136 else: 2137 if not '_extra' in new_row: 2138 new_row['_extra'] = Row() 2139 new_row['_extra'][colname] = \ 2140 self.parse_value(value, 2141 fields[j].type,blob_decode) 2142 new_column_name = \ 2143 REGEX_SELECT_AS_PARSER.search(colname) 2144 if not new_column_name is None: 2145 column_name = new_column_name.groups(0) 2146 setattr(new_row,column_name[0],value) 2147 new_rows.append(new_row) 2148 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2149 2150 2151 for tablename in virtualtables: 2152 table = db[tablename] 2153 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2154 if isinstance(v,FieldVirtual)] 2155 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2156 if isinstance(v,FieldMethod)] 2157 if fields_virtual or fields_lazy: 2158 for row in rowsobj.records: 2159 box = row[tablename] 2160 for f,v in fields_virtual: 2161 try: 2162 box[f] = v.f(row) 2163 except AttributeError: 2164 pass # not enough fields to define virtual field 2165 for f,v in fields_lazy: 2166 try: 2167 box[f] = (v.handler or VirtualCommand)(v.f,row) 2168 except AttributeError: 2169 pass # not enough fields to define virtual field 2170 2171 ### old style virtual fields 2172 for item in table.virtualfields: 2173 try: 2174 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2175 except (KeyError, AttributeError): 2176 # to avoid breaking virtualfields when partial select 2177 pass 2178 return rowsobj
2179
2180 - def common_filter(self, query, tablenames):
2181 tenant_fieldname = self.db._request_tenant 2182 2183 for tablename in tablenames: 2184 table = self.db[tablename] 2185 2186 # deal with user provided filters 2187 if table._common_filter != None: 2188 query = query & table._common_filter(query) 2189 2190 # deal with multi_tenant filters 2191 if tenant_fieldname in table: 2192 default = table[tenant_fieldname].default 2193 if not default is None: 2194 newquery = table[tenant_fieldname] == default 2195 if query is None: 2196 query = newquery 2197 else: 2198 query = query & newquery 2199 return query
2200
2201 - def CASE(self,query,t,f):
2202 def represent(x): 2203 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2204 if x is None: return 'NULL' 2205 elif isinstance(x,Expression): return str(x) 2206 else: return self.represent(x,types.get(type(x),'string'))
2207 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2208 (self.expand(query),represent(t),represent(f))) 2209
2210 ################################################################################### 2211 # List of all the available adapters; they all extend BaseAdapter. 2212 ################################################################################### 2213 2214 -class SQLiteAdapter(BaseAdapter):
2215 drivers = ('sqlite2','sqlite3') 2216 2217 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2218
2219 - def EXTRACT(self,field,what):
2220 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2221 2222 @staticmethod
2223 - def web2py_extract(lookup, s):
2224 table = { 2225 'year': (0, 4), 2226 'month': (5, 7), 2227 'day': (8, 10), 2228 'hour': (11, 13), 2229 'minute': (14, 16), 2230 'second': (17, 19), 2231 } 2232 try: 2233 if lookup != 'epoch': 2234 (i, j) = table[lookup] 2235 return int(s[i:j]) 2236 else: 2237 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2238 except: 2239 return None
2240 2241 @staticmethod
2242 - def web2py_regexp(expression, item):
2243 return re.compile(expression).search(item) is not None
2244
2245 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2246 credential_decoder=IDENTITY, driver_args={}, 2247 adapter_args={}, do_connect=True, after_connection=None):
2248 self.db = db 2249 self.dbengine = "sqlite" 2250 self.uri = uri 2251 if do_connect: self.find_driver(adapter_args) 2252 self.pool_size = 0 2253 self.folder = folder 2254 self.db_codec = db_codec 2255 self._after_connection = after_connection 2256 self.find_or_make_work_folder() 2257 path_encoding = sys.getfilesystemencoding() \ 2258 or locale.getdefaultlocale()[1] or 'utf8' 2259 if uri.startswith('sqlite:memory'): 2260 self.dbpath = ':memory:' 2261 else: 2262 self.dbpath = uri.split('://',1)[1] 2263 if self.dbpath[0] != '/': 2264 if PYTHON_VERSION == 2: 2265 self.dbpath = pjoin( 2266 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2267 else: 2268 self.dbpath = pjoin(self.folder, self.dbpath) 2269 if not 'check_same_thread' in driver_args: 2270 driver_args['check_same_thread'] = False 2271 if not 'detect_types' in driver_args and do_connect: 2272 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2273 def connector(dbpath=self.dbpath, driver_args=driver_args): 2274 return self.driver.Connection(dbpath, **driver_args)
2275 self.connector = connector 2276 if do_connect: self.reconnect()
2277
2278 - def after_connection(self):
2279 self.connection.create_function('web2py_extract', 2, 2280 SQLiteAdapter.web2py_extract) 2281 self.connection.create_function("REGEXP", 2, 2282 SQLiteAdapter.web2py_regexp)
2283
2284 - def _truncate(self, table, mode=''):
2285 tablename = table._tablename 2286 return ['DELETE FROM %s;' % tablename, 2287 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2288
2289 - def lastrowid(self, table):
2290 return self.cursor.lastrowid
2291
2292 - def REGEXP(self,first,second):
2293 return '(%s REGEXP %s)' % (self.expand(first), 2294 self.expand(second,'string'))
2295
2296 - def select(self, query, fields, attributes):
2297 """ 2298 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2299 Note that the entire database, rather than one record, is locked 2300 (it will be locked eventually anyway by the following UPDATE). 2301 """ 2302 if attributes.get('for_update', False) and not 'cache' in attributes: 2303 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2304 return super(SQLiteAdapter, self).select(query, fields, attributes)
2305
2306 -class SpatiaLiteAdapter(SQLiteAdapter):
2307 drivers = ('sqlite3','sqlite2') 2308 2309 types = copy.copy(BaseAdapter.types) 2310 types.update(geometry='GEOMETRY') 2311
2312 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2313 credential_decoder=IDENTITY, driver_args={}, 2314 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2315 self.db = db 2316 self.dbengine = "spatialite" 2317 self.uri = uri 2318 if do_connect: self.find_driver(adapter_args) 2319 self.pool_size = 0 2320 self.folder = folder 2321 self.db_codec = db_codec 2322 self._after_connection = after_connection 2323 self.find_or_make_work_folder() 2324 self.srid = srid 2325 path_encoding = sys.getfilesystemencoding() \ 2326 or locale.getdefaultlocale()[1] or 'utf8' 2327 if uri.startswith('spatialite:memory'): 2328 self.dbpath = ':memory:' 2329 else: 2330 self.dbpath = uri.split('://',1)[1] 2331 if self.dbpath[0] != '/': 2332 self.dbpath = pjoin( 2333 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2334 if not 'check_same_thread' in driver_args: 2335 driver_args['check_same_thread'] = False 2336 if not 'detect_types' in driver_args and do_connect: 2337 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2338 def connector(dbpath=self.dbpath, driver_args=driver_args): 2339 return self.driver.Connection(dbpath, **driver_args)
2340 self.connector = connector 2341 if do_connect: self.reconnect()
2342
2343 - def after_connection(self):
2344 self.connection.enable_load_extension(True) 2345 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2346 # Linux uses libspatialite.so 2347 # Mac OS X uses libspatialite.dylib 2348 libspatialite = SPATIALLIBS[platform.system()] 2349 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2350 2351 self.connection.create_function('web2py_extract', 2, 2352 SQLiteAdapter.web2py_extract) 2353 self.connection.create_function("REGEXP", 2, 2354 SQLiteAdapter.web2py_regexp)
2355 2356 # GIS functions 2357
2358 - def ST_ASGEOJSON(self, first, second):
2359 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2360 second['precision'], second['options'])
2361
2362 - def ST_ASTEXT(self, first):
2363 return 'AsText(%s)' %(self.expand(first))
2364
2365 - def ST_CONTAINS(self, first, second):
2366 return 'Contains(%s,%s)' %(self.expand(first), 2367 self.expand(second, first.type))
2368
2369 - def ST_DISTANCE(self, first, second):
2370 return 'Distance(%s,%s)' %(self.expand(first), 2371 self.expand(second, first.type))
2372
2373 - def ST_EQUALS(self, first, second):
2374 return 'Equals(%s,%s)' %(self.expand(first), 2375 self.expand(second, first.type))
2376
2377 - def ST_INTERSECTS(self, first, second):
2378 return 'Intersects(%s,%s)' %(self.expand(first), 2379 self.expand(second, first.type))
2380
2381 - def ST_OVERLAPS(self, first, second):
2382 return 'Overlaps(%s,%s)' %(self.expand(first), 2383 self.expand(second, first.type))
2384
2385 - def ST_SIMPLIFY(self, first, second):
2386 return 'Simplify(%s,%s)' %(self.expand(first), 2387 self.expand(second, 'double'))
2388
2389 - def ST_TOUCHES(self, first, second):
2390 return 'Touches(%s,%s)' %(self.expand(first), 2391 self.expand(second, first.type))
2392
2393 - def ST_WITHIN(self, first, second):
2394 return 'Within(%s,%s)' %(self.expand(first), 2395 self.expand(second, first.type))
2396
2397 - def represent(self, obj, fieldtype):
2398 field_is_type = fieldtype.startswith 2399 if field_is_type('geo'): 2400 srid = 4326 # Spatialite default srid for geometry 2401 geotype, parms = fieldtype[:-1].split('(') 2402 parms = parms.split(',') 2403 if len(parms) >= 2: 2404 schema, srid = parms[:2] 2405 # if field_is_type('geometry'): 2406 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2407 # elif field_is_type('geography'): 2408 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2409 # else: 2410 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2411 return value 2412 return BaseAdapter.represent(self, obj, fieldtype)
2413
2414 2415 -class JDBCSQLiteAdapter(SQLiteAdapter):
2416 drivers = ('zxJDBC_sqlite',) 2417
2418 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2419 credential_decoder=IDENTITY, driver_args={}, 2420 adapter_args={}, do_connect=True, after_connection=None):
2421 self.db = db 2422 self.dbengine = "sqlite" 2423 self.uri = uri 2424 if do_connect: self.find_driver(adapter_args) 2425 self.pool_size = pool_size 2426 self.folder = folder 2427 self.db_codec = db_codec 2428 self._after_connection = after_connection 2429 self.find_or_make_work_folder() 2430 path_encoding = sys.getfilesystemencoding() \ 2431 or locale.getdefaultlocale()[1] or 'utf8' 2432 if uri.startswith('sqlite:memory'): 2433 self.dbpath = ':memory:' 2434 else: 2435 self.dbpath = uri.split('://',1)[1] 2436 if self.dbpath[0] != '/': 2437 self.dbpath = pjoin( 2438 self.folder.decode(path_encoding).encode('utf8'), self.dbpath) 2439 def connector(dbpath=self.dbpath,driver_args=driver_args): 2440 return self.driver.connect( 2441 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2442 **driver_args)
2443 self.connector = connector 2444 if do_connect: self.reconnect()
2445
2446 - def after_connection(self):
2447 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2448 self.connection.create_function('web2py_extract', 2, 2449 SQLiteAdapter.web2py_extract)
2450
2451 - def execute(self, a):
2452 return self.log_execute(a)
2453
2454 2455 -class MySQLAdapter(BaseAdapter):
2456 drivers = ('MySQLdb','pymysql', 'mysqlconnector') 2457 2458 commit_on_alter_table = True 2459 support_distributed_transaction = True 2460 types = { 2461 'boolean': 'CHAR(1)', 2462 'string': 'VARCHAR(%(length)s)', 2463 'text': 'LONGTEXT', 2464 'json': 'LONGTEXT', 2465 'password': 'VARCHAR(%(length)s)', 2466 'blob': 'LONGBLOB', 2467 'upload': 'VARCHAR(%(length)s)', 2468 'integer': 'INT', 2469 'bigint': 'BIGINT', 2470 'float': 'FLOAT', 2471 'double': 'DOUBLE', 2472 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2473 'date': 'DATE', 2474 'time': 'TIME', 2475 'datetime': 'DATETIME', 2476 'id': 'INT AUTO_INCREMENT NOT NULL', 2477 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2478 'list:integer': 'LONGTEXT', 2479 'list:string': 'LONGTEXT', 2480 'list:reference': 'LONGTEXT', 2481 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2482 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2483 } 2484 2485 QUOTE_TEMPLATE = "`%s`" 2486
2487 - def varquote(self,name):
2488 return varquote_aux(name,'`%s`')
2489
2490 - def RANDOM(self):
2491 return 'RAND()'
2492
2493 - def SUBSTRING(self,field,parameters):
2494 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2495 parameters[0], parameters[1])
2496
2497 - def EPOCH(self, first):
2498 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2499
2500 - def CONCAT(self, *items):
2501 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2502
2503 - def REGEXP(self,first,second):
2504 return '(%s REGEXP %s)' % (self.expand(first), 2505 self.expand(second,'string'))
2506
2507 - def _drop(self,table,mode):
2508 # breaks db integrity but without this mysql does not drop table 2509 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2510 'SET FOREIGN_KEY_CHECKS=1;']
2511
2512 - def _insert_empty(self, table):
2513 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2514
2515 - def distributed_transaction_begin(self,key):
2516 self.execute('XA START;')
2517
2518 - def prepare(self,key):
2519 self.execute("XA END;") 2520 self.execute("XA PREPARE;")
2521
2522 - def commit_prepared(self,ley):
2523 self.execute("XA COMMIT;")
2524
2525 - def rollback_prepared(self,key):
2526 self.execute("XA ROLLBACK;")
2527 2528 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2529
2530 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2531 credential_decoder=IDENTITY, driver_args={}, 2532 adapter_args={}, do_connect=True, after_connection=None):
2533 self.db = db 2534 self.dbengine = "mysql" 2535 self.uri = uri 2536 if do_connect: self.find_driver(adapter_args,uri) 2537 self.pool_size = pool_size 2538 self.folder = folder 2539 self.db_codec = db_codec 2540 self._after_connection = after_connection 2541 self.find_or_make_work_folder() 2542 ruri = uri.split('://',1)[1] 2543 m = self.REGEX_URI.match(ruri) 2544 if not m: 2545 raise SyntaxError( 2546 "Invalid URI string in DAL: %s" % self.uri) 2547 user = credential_decoder(m.group('user')) 2548 if not user: 2549 raise SyntaxError('User required') 2550 password = credential_decoder(m.group('password')) 2551 if not password: 2552 password = '' 2553 host = m.group('host') 2554 if not host: 2555 raise SyntaxError('Host name required') 2556 db = m.group('db') 2557 if not db: 2558 raise SyntaxError('Database name required') 2559 port = int(m.group('port') or '3306') 2560 charset = m.group('charset') or 'utf8' 2561 driver_args.update(db=db, 2562 user=credential_decoder(user), 2563 passwd=credential_decoder(password), 2564 host=host, 2565 port=port, 2566 charset=charset) 2567 2568 2569 def connector(driver_args=driver_args): 2570 return self.driver.connect(**driver_args)
2571 self.connector = connector 2572 if do_connect: self.reconnect()
2573
2574 - def after_connection(self):
2575 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2576 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2577
2578 - def lastrowid(self,table):
2579 self.execute('select last_insert_id();') 2580 return int(self.cursor.fetchone()[0])
2581
2582 2583 -class PostgreSQLAdapter(BaseAdapter):
2584 drivers = ('psycopg2','pg8000') 2585 2586 support_distributed_transaction = True 2587 types = { 2588 'boolean': 'CHAR(1)', 2589 'string': 'VARCHAR(%(length)s)', 2590 'text': 'TEXT', 2591 'json': 'TEXT', 2592 'password': 'VARCHAR(%(length)s)', 2593 'blob': 'BYTEA', 2594 'upload': 'VARCHAR(%(length)s)', 2595 'integer': 'INTEGER', 2596 'bigint': 'BIGINT', 2597 'float': 'FLOAT', 2598 'double': 'FLOAT8', 2599 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2600 'date': 'DATE', 2601 'time': 'TIME', 2602 'datetime': 'TIMESTAMP', 2603 'id': 'SERIAL PRIMARY KEY', 2604 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2605 'list:integer': 'TEXT', 2606 'list:string': 'TEXT', 2607 'list:reference': 'TEXT', 2608 'geometry': 'GEOMETRY', 2609 'geography': 'GEOGRAPHY', 2610 'big-id': 'BIGSERIAL PRIMARY KEY', 2611 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2612 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2613 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2614 2615 } 2616 2617 QUOTE_TEMPLATE = '%s' 2618
2619 - def varquote(self,name):
2620 return varquote_aux(name,'"%s"')
2621
2622 - def adapt(self,obj):
2623 if self.driver_name == 'psycopg2': 2624 return psycopg2_adapt(obj).getquoted() 2625 elif self.driver_name == 'pg8000': 2626 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2627 else: 2628 return "'%s'" % str(obj).replace("'","''")
2629
2630 - def sequence_name(self,table):
2631 return '%s_id_Seq' % table
2632
2633 - def RANDOM(self):
2634 return 'RANDOM()'
2635
2636 - def ADD(self, first, second):
2637 t = first.type 2638 if t in ('text','string','password', 'json', 'upload','blob'): 2639 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2640 else: 2641 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2642
2643 - def distributed_transaction_begin(self,key):
2644 return
2645
2646 - def prepare(self,key):
2647 self.execute("PREPARE TRANSACTION '%s';" % key)
2648
2649 - def commit_prepared(self,key):
2650 self.execute("COMMIT PREPARED '%s';" % key)
2651
2652 - def rollback_prepared(self,key):
2653 self.execute("ROLLBACK PREPARED '%s';" % key)
2654
2655 - def create_sequence_and_triggers(self, query, table, **args):
2656 # following lines should only be executed if table._sequence_name does not exist 2657 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2658 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2659 # % (table._tablename, table._fieldname, table._sequence_name)) 2660 self.execute(query)
2661 2662 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2663
2664 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2665 credential_decoder=IDENTITY, driver_args={}, 2666 adapter_args={}, do_connect=True, srid=4326, 2667 after_connection=None):
2668 self.db = db 2669 self.dbengine = "postgres" 2670 self.uri = uri 2671 if do_connect: self.find_driver(adapter_args,uri) 2672 self.pool_size = pool_size 2673 self.folder = folder 2674 self.db_codec = db_codec 2675 self._after_connection = after_connection 2676 self.srid = srid 2677 self.find_or_make_work_folder() 2678 ruri = uri.split('://',1)[1] 2679 m = self.REGEX_URI.match(ruri) 2680 if not m: 2681 raise SyntaxError("Invalid URI string in DAL") 2682 user = credential_decoder(m.group('user')) 2683 if not user: 2684 raise SyntaxError('User required') 2685 password = credential_decoder(m.group('password')) 2686 if not password: 2687 password = '' 2688 host = m.group('host') 2689 if not host: 2690 raise SyntaxError('Host name required') 2691 db = m.group('db') 2692 if not db: 2693 raise SyntaxError('Database name required') 2694 port = m.group('port') or '5432' 2695 sslmode = m.group('sslmode') 2696 if sslmode: 2697 msg = ("dbname='%s' user='%s' host='%s' " 2698 "port=%s password='%s' sslmode='%s'") \ 2699 % (db, user, host, port, password, sslmode) 2700 else: 2701 msg = ("dbname='%s' user='%s' host='%s' " 2702 "port=%s password='%s'") \ 2703 % (db, user, host, port, password) 2704 # choose diver according uri 2705 if self.driver: 2706 self.__version__ = "%s %s" % (self.driver.__name__, 2707 self.driver.__version__) 2708 else: 2709 self.__version__ = None 2710 def connector(msg=msg,driver_args=driver_args): 2711 return self.driver.connect(msg,**driver_args)
2712 self.connector = connector 2713 if do_connect: self.reconnect()
2714
2715 - def after_connection(self):
2716 self.connection.set_client_encoding('UTF8') 2717 self.execute("SET standard_conforming_strings=on;") 2718 self.try_json()
2719
2720 - def lastrowid(self,table):
2721 self.execute("select currval('%s')" % table._sequence_name) 2722 return int(self.cursor.fetchone()[0])
2723
2724 - def try_json(self):
2725 # check JSON data type support 2726 # (to be added to after_connection) 2727 if self.driver_name == "pg8000": 2728 supports_json = self.connection.server_version >= "9.2.0" 2729 elif (self.driver_name == "psycopg2") and \ 2730 (self.driver.__version__ >= "2.0.12"): 2731 supports_json = self.connection.server_version >= 90200 2732 elif self.driver_name == "zxJDBC": 2733 supports_json = self.connection.dbversion >= "9.2.0" 2734 else: supports_json = None 2735 if supports_json: 2736 self.types["json"] = "JSON" 2737 self.native_json = True 2738 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2739
2740 - def LIKE(self,first,second):
2741 args = (self.expand(first), self.expand(second,'string')) 2742 if not first.type in ('string', 'text', 'json'): 2743 return '(%s LIKE %s)' % ( 2744 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2745 else: 2746 return '(%s LIKE %s)' % args
2747
2748 - def ILIKE(self,first,second):
2749 args = (self.expand(first), self.expand(second,'string')) 2750 if not first.type in ('string', 'text', 'json'): 2751 return '(%s LIKE %s)' % ( 2752 self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) 2753 else: 2754 return '(%s ILIKE %s)' % args
2755
2756 - def REGEXP(self,first,second):
2757 return '(%s ~ %s)' % (self.expand(first), 2758 self.expand(second,'string'))
2759
2760 - def STARTSWITH(self,first,second):
2761 return '(%s ILIKE %s)' % (self.expand(first), 2762 self.expand(second+'%','string'))
2763
2764 - def ENDSWITH(self,first,second):
2765 return '(%s ILIKE %s)' % (self.expand(first), 2766 self.expand('%'+second,'string'))
2767 2768 # GIS functions 2769
2770 - def ST_ASGEOJSON(self, first, second):
2771 """ 2772 http://postgis.org/docs/ST_AsGeoJSON.html 2773 """ 2774 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2775 self.expand(first), second['precision'], second['options'])
2776
2777 - def ST_ASTEXT(self, first):
2778 """ 2779 http://postgis.org/docs/ST_AsText.html 2780 """ 2781 return 'ST_AsText(%s)' %(self.expand(first))
2782
2783 - def ST_X(self, first):
2784 """ 2785 http://postgis.org/docs/ST_X.html 2786 """ 2787 return 'ST_X(%s)' %(self.expand(first))
2788
2789 - def ST_Y(self, first):
2790 """ 2791 http://postgis.org/docs/ST_Y.html 2792 """ 2793 return 'ST_Y(%s)' %(self.expand(first))
2794
2795 - def ST_CONTAINS(self, first, second):
2796 """ 2797 http://postgis.org/docs/ST_Contains.html 2798 """ 2799 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2800
2801 - def ST_DISTANCE(self, first, second):
2802 """ 2803 http://postgis.org/docs/ST_Distance.html 2804 """ 2805 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2806
2807 - def ST_EQUALS(self, first, second):
2808 """ 2809 http://postgis.org/docs/ST_Equals.html 2810 """ 2811 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2812
2813 - def ST_INTERSECTS(self, first, second):
2814 """ 2815 http://postgis.org/docs/ST_Intersects.html 2816 """ 2817 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2818
2819 - def ST_OVERLAPS(self, first, second):
2820 """ 2821 http://postgis.org/docs/ST_Overlaps.html 2822 """ 2823 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2824
2825 - def ST_SIMPLIFY(self, first, second):
2826 """ 2827 http://postgis.org/docs/ST_Simplify.html 2828 """ 2829 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2830
2831 - def ST_TOUCHES(self, first, second):
2832 """ 2833 http://postgis.org/docs/ST_Touches.html 2834 """ 2835 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2836
2837 - def ST_WITHIN(self, first, second):
2838 """ 2839 http://postgis.org/docs/ST_Within.html 2840 """ 2841 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2842
2843 - def represent(self, obj, fieldtype):
2844 field_is_type = fieldtype.startswith 2845 if field_is_type('geo'): 2846 srid = 4326 # postGIS default srid for geometry 2847 geotype, parms = fieldtype[:-1].split('(') 2848 parms = parms.split(',') 2849 if len(parms) >= 2: 2850 schema, srid = parms[:2] 2851 if field_is_type('geometry'): 2852 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2853 elif field_is_type('geography'): 2854 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2855 # else: 2856 # raise SyntaxError('Invalid field type %s' %fieldtype) 2857 return value 2858 return BaseAdapter.represent(self, obj, fieldtype)
2859
2860 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2861 drivers = ('psycopg2','pg8000') 2862 2863 types = { 2864 'boolean': 'CHAR(1)', 2865 'string': 'VARCHAR(%(length)s)', 2866 'text': 'TEXT', 2867 'json': 'TEXT', 2868 'password': 'VARCHAR(%(length)s)', 2869 'blob': 'BYTEA', 2870 'upload': 'VARCHAR(%(length)s)', 2871 'integer': 'INTEGER', 2872 'bigint': 'BIGINT', 2873 'float': 'FLOAT', 2874 'double': 'FLOAT8', 2875 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2876 'date': 'DATE', 2877 'time': 'TIME', 2878 'datetime': 'TIMESTAMP', 2879 'id': 'SERIAL PRIMARY KEY', 2880 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2881 'list:integer': 'BIGINT[]', 2882 'list:string': 'TEXT[]', 2883 'list:reference': 'BIGINT[]', 2884 'geometry': 'GEOMETRY', 2885 'geography': 'GEOGRAPHY', 2886 'big-id': 'BIGSERIAL PRIMARY KEY', 2887 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2888 } 2889
2890 - def parse_list_integers(self, value, field_type):
2891 return value
2892
2893 - def parse_list_references(self, value, field_type):
2894 return [self.parse_reference(r, field_type[5:]) for r in value]
2895
2896 - def parse_list_strings(self, value, field_type):
2897 return value
2898
2899 - def represent(self, obj, fieldtype):
2900 field_is_type = fieldtype.startswith 2901 if field_is_type('list:'): 2902 if not obj: 2903 obj = [] 2904 elif not isinstance(obj, (list, tuple)): 2905 obj = [obj] 2906 if field_is_type('list:string'): 2907 obj = map(str,obj) 2908 else: 2909 obj = map(int,obj) 2910 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2911 return BaseAdapter.represent(self, obj, fieldtype)
2912
2913 2914 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2915 drivers = ('zxJDBC',) 2916 2917 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2918
2919 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2920 credential_decoder=IDENTITY, driver_args={}, 2921 adapter_args={}, do_connect=True, after_connection=None ):
2922 self.db = db 2923 self.dbengine = "postgres" 2924 self.uri = uri 2925 if do_connect: self.find_driver(adapter_args,uri) 2926 self.pool_size = pool_size 2927 self.folder = folder 2928 self.db_codec = db_codec 2929 self._after_connection = after_connection 2930 self.find_or_make_work_folder() 2931 ruri = uri.split('://',1)[1] 2932 m = self.REGEX_URI.match(ruri) 2933 if not m: 2934 raise SyntaxError("Invalid URI string in DAL") 2935 user = credential_decoder(m.group('user')) 2936 if not user: 2937 raise SyntaxError('User required') 2938 password = credential_decoder(m.group('password')) 2939 if not password: 2940 password = '' 2941 host = m.group('host') 2942 if not host: 2943 raise SyntaxError('Host name required') 2944 db = m.group('db') 2945 if not db: 2946 raise SyntaxError('Database name required') 2947 port = m.group('port') or '5432' 2948 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2949 def connector(msg=msg,driver_args=driver_args): 2950 return self.driver.connect(*msg,**driver_args)
2951 self.connector = connector 2952 if do_connect: self.reconnect()
2953
2954 - def after_connection(self):
2955 self.connection.set_client_encoding('UTF8') 2956 self.execute('BEGIN;') 2957 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2958 self.try_json()
2959
2960 2961 -class OracleAdapter(BaseAdapter):
2962 drivers = ('cx_Oracle',) 2963 2964 commit_on_alter_table = False 2965 types = { 2966 'boolean': 'CHAR(1)', 2967 'string': 'VARCHAR2(%(length)s)', 2968 'text': 'CLOB', 2969 'json': 'CLOB', 2970 'password': 'VARCHAR2(%(length)s)', 2971 'blob': 'CLOB', 2972 'upload': 'VARCHAR2(%(length)s)', 2973 'integer': 'INT', 2974 'bigint': 'NUMBER', 2975 'float': 'FLOAT', 2976 'double': 'BINARY_DOUBLE', 2977 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2978 'date': 'DATE', 2979 'time': 'CHAR(8)', 2980 'datetime': 'DATE', 2981 'id': 'NUMBER PRIMARY KEY', 2982 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2983 'list:integer': 'CLOB', 2984 'list:string': 'CLOB', 2985 'list:reference': 'CLOB', 2986 'big-id': 'NUMBER PRIMARY KEY', 2987 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2988 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2989 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2990 } 2991
2992 - def sequence_name(self,tablename):
2993 return '%s_sequence' % tablename
2994
2995 - def trigger_name(self,tablename):
2996 return '%s_trigger' % tablename
2997
2998 - def LEFT_JOIN(self):
2999 return 'LEFT OUTER JOIN'
3000
3001 - def RANDOM(self):
3002 return 'dbms_random.value'
3003
3004 - def NOT_NULL(self,default,field_type):
3005 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3006
3007 - def _drop(self,table,mode):
3008 sequence_name = table._sequence_name 3009 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
3010
3011 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3012 if limitby: 3013 (lmin, lmax) = limitby 3014 if len(sql_w) > 1: 3015 sql_w_row = sql_w + ' AND w_row > %i' % lmin 3016 else: 3017 sql_w_row = 'WHERE w_row > %i' % lmin 3018 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 3019 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3020
3021 - def constraint_name(self, tablename, fieldname):
3022 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 3023 if len(constraint_name)>30: 3024 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 3025 return constraint_name
3026
3027 - def represent_exceptions(self, obj, fieldtype):
3028 if fieldtype == 'blob': 3029 obj = base64.b64encode(str(obj)) 3030 return ":CLOB('%s')" % obj 3031 elif fieldtype == 'date': 3032 if isinstance(obj, (datetime.date, datetime.datetime)): 3033 obj = obj.isoformat()[:10] 3034 else: 3035 obj = str(obj) 3036 return "to_date('%s','yyyy-mm-dd')" % obj 3037 elif fieldtype == 'datetime': 3038 if isinstance(obj, datetime.datetime): 3039 obj = obj.isoformat()[:19].replace('T',' ') 3040 elif isinstance(obj, datetime.date): 3041 obj = obj.isoformat()[:10]+' 00:00:00' 3042 else: 3043 obj = str(obj) 3044 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 3045 return None
3046
3047 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3048 credential_decoder=IDENTITY, driver_args={}, 3049 adapter_args={}, do_connect=True, after_connection=None):
3050 self.db = db 3051 self.dbengine = "oracle" 3052 self.uri = uri 3053 if do_connect: self.find_driver(adapter_args,uri) 3054 self.pool_size = pool_size 3055 self.folder = folder 3056 self.db_codec = db_codec 3057 self._after_connection = after_connection 3058 self.find_or_make_work_folder() 3059 ruri = uri.split('://',1)[1] 3060 if not 'threaded' in driver_args: 3061 driver_args['threaded']=True 3062 def connector(uri=ruri,driver_args=driver_args): 3063 return self.driver.connect(uri,**driver_args)
3064 self.connector = connector 3065 if do_connect: self.reconnect()
3066
3067 - def after_connection(self):
3068 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 3069 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
3070 3071 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 3072
3073 - def execute(self, command, args=None):
3074 args = args or [] 3075 i = 1 3076 while True: 3077 m = self.oracle_fix.match(command) 3078 if not m: 3079 break 3080 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 3081 args.append(m.group('clob')[6:-2].replace("''", "'")) 3082 i += 1 3083 if command[-1:]==';': 3084 command = command[:-1] 3085 return self.log_execute(command, args)
3086
3087 - def create_sequence_and_triggers(self, query, table, **args):
3088 tablename = table._tablename 3089 id_name = table._id.name 3090 sequence_name = table._sequence_name 3091 trigger_name = table._trigger_name 3092 self.execute(query) 3093 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3094 self.execute(""" 3095 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3096 DECLARE 3097 curr_val NUMBER; 3098 diff_val NUMBER; 3099 PRAGMA autonomous_transaction; 3100 BEGIN 3101 IF :NEW.%(id)s IS NOT NULL THEN 3102 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3103 diff_val := :NEW.%(id)s - curr_val - 1; 3104 IF diff_val != 0 THEN 3105 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3106 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3107 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3108 END IF; 3109 END IF; 3110 SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; 3111 END; 3112 """ % dict(trigger_name=trigger_name, tablename=tablename, 3113 sequence_name=sequence_name,id=id_name))
3114
3115 - def lastrowid(self,table):
3116 sequence_name = table._sequence_name 3117 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3118 return long(self.cursor.fetchone()[0])
3119 3120 #def parse_value(self, value, field_type, blob_decode=True): 3121 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3122 # try: 3123 # value = value.read() 3124 # except self.driver.ProgrammingError: 3125 # # After a subsequent fetch the LOB value is not valid anymore 3126 # pass 3127 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3128
3129 - def _fetchall(self):
3130 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3131 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3132 for c in r]) for r in self.cursor] 3133 else: 3134 return self.cursor.fetchall()
3135
3136 -class MSSQLAdapter(BaseAdapter):
3137 drivers = ('pyodbc',) 3138 T_SEP = 'T' 3139 3140 QUOTE_TEMPLATE = "[%s]" 3141 3142 types = { 3143 'boolean': 'BIT', 3144 'string': 'VARCHAR(%(length)s)', 3145 'text': 'TEXT', 3146 'json': 'TEXT', 3147 'password': 'VARCHAR(%(length)s)', 3148 'blob': 'IMAGE', 3149 'upload': 'VARCHAR(%(length)s)', 3150 'integer': 'INT', 3151 'bigint': 'BIGINT', 3152 'float': 'FLOAT', 3153 'double': 'FLOAT', 3154 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3155 'date': 'DATETIME', 3156 'time': 'CHAR(8)', 3157 'datetime': 'DATETIME', 3158 'id': 'INT IDENTITY PRIMARY KEY', 3159 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3160 'list:integer': 'TEXT', 3161 'list:string': 'TEXT', 3162 'list:reference': 'TEXT', 3163 'geometry': 'geometry', 3164 'geography': 'geography', 3165 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3166 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3167 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3168 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3169 } 3170
3171 - def concat_add(self,tablename):
3172 return '; ALTER TABLE %s ADD ' % tablename
3173
3174 - def varquote(self,name):
3175 return varquote_aux(name,'[%s]')
3176
3177 - def EXTRACT(self,field,what):
3178 return "DATEPART(%s,%s)" % (what, self.expand(field))
3179
3180 - def LEFT_JOIN(self):
3181 return 'LEFT OUTER JOIN'
3182
3183 - def RANDOM(self):
3184 return 'NEWID()'
3185
3186 - def ALLOW_NULL(self):
3187 return ' NULL'
3188
3189 - def CAST(self, first, second):
3190 return first # apparently no cast necessary in MSSQL
3191
3192 - def SUBSTRING(self,field,parameters):
3193 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3194
3195 - def PRIMARY_KEY(self,key):
3196 return 'PRIMARY KEY CLUSTERED (%s)' % key
3197
3198 - def AGGREGATE(self, first, what):
3199 if what == 'LENGTH': 3200 what = 'LEN' 3201 return "%s(%s)" % (what, self.expand(first))
3202 3203
3204 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3205 if limitby: 3206 (lmin, lmax) = limitby 3207 sql_s += ' TOP %i' % lmax 3208 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3209 3210 TRUE = 1 3211 FALSE = 0 3212 3213 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3214 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3215 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3216
3217 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3218 credential_decoder=IDENTITY, driver_args={}, 3219 adapter_args={}, do_connect=True, srid=4326, 3220 after_connection=None):
3221 self.db = db 3222 self.dbengine = "mssql" 3223 self.uri = uri 3224 if do_connect: self.find_driver(adapter_args,uri) 3225 self.pool_size = pool_size 3226 self.folder = folder 3227 self.db_codec = db_codec 3228 self._after_connection = after_connection 3229 self.srid = srid 3230 self.find_or_make_work_folder() 3231 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3232 ruri = uri.split('://',1)[1] 3233 if '@' not in ruri: 3234 try: 3235 m = self.REGEX_DSN.match(ruri) 3236 if not m: 3237 raise SyntaxError( 3238 'Parsing uri string(%s) has no result' % self.uri) 3239 dsn = m.group('dsn') 3240 if not dsn: 3241 raise SyntaxError('DSN required') 3242 except SyntaxError: 3243 e = sys.exc_info()[1] 3244 LOGGER.error('NdGpatch error') 3245 raise e 3246 # was cnxn = 'DSN=%s' % dsn 3247 cnxn = dsn 3248 else: 3249 m = self.REGEX_URI.match(ruri) 3250 if not m: 3251 raise SyntaxError( 3252 "Invalid URI string in DAL: %s" % self.uri) 3253 user = credential_decoder(m.group('user')) 3254 if not user: 3255 raise SyntaxError('User required') 3256 password = credential_decoder(m.group('password')) 3257 if not password: 3258 password = '' 3259 host = m.group('host') 3260 if not host: 3261 raise SyntaxError('Host name required') 3262 db = m.group('db') 3263 if not db: 3264 raise SyntaxError('Database name required') 3265 port = m.group('port') or '1433' 3266 # Parse the optional url name-value arg pairs after the '?' 3267 # (in the form of arg1=value1&arg2=value2&...) 3268 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3269 argsdict = { 'DRIVER':'{SQL Server}' } 3270 urlargs = m.group('urlargs') or '' 3271 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3272 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3273 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3274 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3275 % (host, port, db, user, password, urlargs) 3276 def connector(cnxn=cnxn,driver_args=driver_args): 3277 return self.driver.connect(cnxn,**driver_args)
3278 self.connector = connector 3279 if do_connect: self.reconnect()
3280
3281 - def lastrowid(self,table):
3282 #self.execute('SELECT @@IDENTITY;') 3283 self.execute('SELECT SCOPE_IDENTITY();') 3284 return long(self.cursor.fetchone()[0])
3285
3286 - def rowslice(self,rows,minimum=0,maximum=None):
3287 if maximum is None: 3288 return rows[minimum:] 3289 return rows[minimum:maximum]
3290
3291 - def EPOCH(self, first):
3292 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3293
3294 - def CONCAT(self, *items):
3295 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3296 3297 # GIS Spatial Extensions 3298 3299 # No STAsGeoJSON in MSSQL 3300
3301 - def ST_ASTEXT(self, first):
3302 return '%s.STAsText()' %(self.expand(first))
3303
3304 - def ST_CONTAINS(self, first, second):
3305 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3306
3307 - def ST_DISTANCE(self, first, second):
3308 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3309
3310 - def ST_EQUALS(self, first, second):
3311 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3312
3313 - def ST_INTERSECTS(self, first, second):
3314 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3315
3316 - def ST_OVERLAPS(self, first, second):
3317 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3318 3319 # no STSimplify in MSSQL 3320
3321 - def ST_TOUCHES(self, first, second):
3322 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3323
3324 - def ST_WITHIN(self, first, second):
3325 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3326
3327 - def represent(self, obj, fieldtype):
3328 field_is_type = fieldtype.startswith 3329 if field_is_type('geometry'): 3330 srid = 0 # MS SQL default srid for geometry 3331 geotype, parms = fieldtype[:-1].split('(') 3332 if parms: 3333 srid = parms 3334 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3335 elif fieldtype == 'geography': 3336 srid = 4326 # MS SQL default srid for geography 3337 geotype, parms = fieldtype[:-1].split('(') 3338 if parms: 3339 srid = parms 3340 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3341 # else: 3342 # raise SyntaxError('Invalid field type %s' %fieldtype) 3343 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3344 return BaseAdapter.represent(self, obj, fieldtype)
3345
3346 3347 -class MSSQL3Adapter(MSSQLAdapter):
3348 """ experimental support for pagination in MSSQL"""
3349 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3350 if limitby: 3351 (lmin, lmax) = limitby 3352 if lmin == 0: 3353 sql_s += ' TOP %i' % lmax 3354 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3355 lmin += 1 3356 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3357 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3358 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3359 sql_f_inner = [f for f in sql_f.split(',')] 3360 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3361 sql_f_iproxy = ', '.join(sql_f_iproxy) 3362 sql_f_oproxy = ', '.join(sql_f_outer) 3363 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3364 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3365 - def rowslice(self,rows,minimum=0,maximum=None):
3366 return rows
3367
3368 3369 -class MSSQL2Adapter(MSSQLAdapter):
3370 drivers = ('pyodbc',) 3371 3372 types = { 3373 'boolean': 'CHAR(1)', 3374 'string': 'NVARCHAR(%(length)s)', 3375 'text': 'NTEXT', 3376 'json': 'NTEXT', 3377 'password': 'NVARCHAR(%(length)s)', 3378 'blob': 'IMAGE', 3379 'upload': 'NVARCHAR(%(length)s)', 3380 'integer': 'INT', 3381 'bigint': 'BIGINT', 3382 'float': 'FLOAT', 3383 'double': 'FLOAT', 3384 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3385 'date': 'DATETIME', 3386 'time': 'CHAR(8)', 3387 'datetime': 'DATETIME', 3388 'id': 'INT IDENTITY PRIMARY KEY', 3389 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3390 'list:integer': 'NTEXT', 3391 'list:string': 'NTEXT', 3392 'list:reference': 'NTEXT', 3393 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3394 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3395 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3396 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3397 } 3398
3399 - def represent(self, obj, fieldtype):
3400 value = BaseAdapter.represent(self, obj, fieldtype) 3401 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3402 value = 'N'+value 3403 return value
3404
3405 - def execute(self,a):
3406 return self.log_execute(a.decode('utf8'))
3407
3408 -class VerticaAdapter(MSSQLAdapter):
3409 drivers = ('pyodbc',) 3410 T_SEP = ' ' 3411 3412 types = { 3413 'boolean': 'BOOLEAN', 3414 'string': 'VARCHAR(%(length)s)', 3415 'text': 'BYTEA', 3416 'json': 'VARCHAR(%(length)s)', 3417 'password': 'VARCHAR(%(length)s)', 3418 'blob': 'BYTEA', 3419 'upload': 'VARCHAR(%(length)s)', 3420 'integer': 'INT', 3421 'bigint': 'BIGINT', 3422 'float': 'FLOAT', 3423 'double': 'DOUBLE PRECISION', 3424 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3425 'date': 'DATE', 3426 'time': 'TIME', 3427 'datetime': 'DATETIME', 3428 'id': 'IDENTITY', 3429 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3430 'list:integer': 'BYTEA', 3431 'list:string': 'BYTEA', 3432 'list:reference': 'BYTEA', 3433 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3434 } 3435 3436
3437 - def EXTRACT(self, first, what):
3438 return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
3439
3440 - def _truncate(self, table, mode=''):
3441 tablename = table._tablename 3442 return ['TRUNCATE %s %s;' % (tablename, mode or '')]
3443
3444 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3445 if limitby: 3446 (lmin, lmax) = limitby 3447 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 3448 return 'SELECT %s %s FROM %s%s%s;' % \ 3449 (sql_s, sql_f, sql_t, sql_w, sql_o)
3450
3451 - def lastrowid(self,table):
3452 self.execute('SELECT LAST_INSERT_ID();') 3453 return long(self.cursor.fetchone()[0])
3454
3455 - def execute(self, a):
3456 return self.log_execute(a)
3457
3458 -class SybaseAdapter(MSSQLAdapter):
3459 drivers = ('Sybase',) 3460 3461 types = { 3462 'boolean': 'BIT', 3463 'string': 'CHAR VARYING(%(length)s)', 3464 'text': 'TEXT', 3465 'json': 'TEXT', 3466 'password': 'CHAR VARYING(%(length)s)', 3467 'blob': 'IMAGE', 3468 'upload': 'CHAR VARYING(%(length)s)', 3469 'integer': 'INT', 3470 'bigint': 'BIGINT', 3471 'float': 'FLOAT', 3472 'double': 'FLOAT', 3473 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3474 'date': 'DATETIME', 3475 'time': 'CHAR(8)', 3476 'datetime': 'DATETIME', 3477 'id': 'INT IDENTITY PRIMARY KEY', 3478 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3479 'list:integer': 'TEXT', 3480 'list:string': 'TEXT', 3481 'list:reference': 'TEXT', 3482 'geometry': 'geometry', 3483 'geography': 'geography', 3484 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3485 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3486 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3487 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3488 } 3489 3490
3491 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3492 credential_decoder=IDENTITY, driver_args={}, 3493 adapter_args={}, do_connect=True, srid=4326, 3494 after_connection=None):
3495 self.db = db 3496 self.dbengine = "sybase" 3497 self.uri = uri 3498 if do_connect: self.find_driver(adapter_args,uri) 3499 self.pool_size = pool_size 3500 self.folder = folder 3501 self.db_codec = db_codec 3502 self._after_connection = after_connection 3503 self.srid = srid 3504 self.find_or_make_work_folder() 3505 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3506 ruri = uri.split('://',1)[1] 3507 if '@' not in ruri: 3508 try: 3509 m = self.REGEX_DSN.match(ruri) 3510 if not m: 3511 raise SyntaxError( 3512 'Parsing uri string(%s) has no result' % self.uri) 3513 dsn = m.group('dsn') 3514 if not dsn: 3515 raise SyntaxError('DSN required') 3516 except SyntaxError: 3517 e = sys.exc_info()[1] 3518 LOGGER.error('NdGpatch error') 3519 raise e 3520 else: 3521 m = self.REGEX_URI.match(uri) 3522 if not m: 3523 raise SyntaxError( 3524 "Invalid URI string in DAL: %s" % self.uri) 3525 user = credential_decoder(m.group('user')) 3526 if not user: 3527 raise SyntaxError('User required') 3528 password = credential_decoder(m.group('password')) 3529 if not password: 3530 password = '' 3531 host = m.group('host') 3532 if not host: 3533 raise SyntaxError('Host name required') 3534 db = m.group('db') 3535 if not db: 3536 raise SyntaxError('Database name required') 3537 port = m.group('port') or '1433' 3538 3539 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3540 3541 driver_args.update(user = credential_decoder(user), 3542 password = credential_decoder(password)) 3543 3544 def connector(dsn=dsn,driver_args=driver_args): 3545 return self.driver.connect(dsn,**driver_args)
3546 self.connector = connector 3547 if do_connect: self.reconnect()
3548
3549 3550 -class FireBirdAdapter(BaseAdapter):
3551 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3552 3553 commit_on_alter_table = False 3554 support_distributed_transaction = True 3555 types = { 3556 'boolean': 'CHAR(1)', 3557 'string': 'VARCHAR(%(length)s)', 3558 'text': 'BLOB SUB_TYPE 1', 3559 'json': 'BLOB SUB_TYPE 1', 3560 'password': 'VARCHAR(%(length)s)', 3561 'blob': 'BLOB SUB_TYPE 0', 3562 'upload': 'VARCHAR(%(length)s)', 3563 'integer': 'INTEGER', 3564 'bigint': 'BIGINT', 3565 'float': 'FLOAT', 3566 'double': 'DOUBLE PRECISION', 3567 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3568 'date': 'DATE', 3569 'time': 'TIME', 3570 'datetime': 'TIMESTAMP', 3571 'id': 'INTEGER PRIMARY KEY', 3572 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3573 'list:integer': 'BLOB SUB_TYPE 1', 3574 'list:string': 'BLOB SUB_TYPE 1', 3575 'list:reference': 'BLOB SUB_TYPE 1', 3576 'big-id': 'BIGINT PRIMARY KEY', 3577 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3578 } 3579
3580 - def sequence_name(self,tablename):
3581 return 'genid_%s' % tablename
3582
3583 - def trigger_name(self,tablename):
3584 return 'trg_id_%s' % tablename
3585
3586 - def RANDOM(self):
3587 return 'RAND()'
3588
3589 - def EPOCH(self, first):
3590 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3591
3592 - def NOT_NULL(self,default,field_type):
3593 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3594
3595 - def SUBSTRING(self,field,parameters):
3596 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3597
3598 - def LENGTH(self, first):
3599 return "CHAR_LENGTH(%s)" % self.expand(first)
3600
3601 - def CONTAINS(self,first,second,case_sensitive=False):
3602 if first.type.startswith('list:'): 3603 second = Expression(None,self.CONCAT('|',Expression( 3604 None,self.REPLACE(second,('|','||'))),'|')) 3605 return '(%s CONTAINING %s)' % (self.expand(first), 3606 self.expand(second, 'string'))
3607
3608 - def _drop(self,table,mode):
3609 sequence_name = table._sequence_name 3610 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3611
3612 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3613 if limitby: 3614 (lmin, lmax) = limitby 3615 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3616 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3617
3618 - def _truncate(self,table,mode = ''):
3619 return ['DELETE FROM %s;' % table._tablename, 3620 'SET GENERATOR %s TO 0;' % table._sequence_name]
3621 3622 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3623
3624 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3625 credential_decoder=IDENTITY, driver_args={}, 3626 adapter_args={}, do_connect=True, after_connection=None):
3627 self.db = db 3628 self.dbengine = "firebird" 3629 self.uri = uri 3630 if do_connect: self.find_driver(adapter_args,uri) 3631 self.pool_size = pool_size 3632 self.folder = folder 3633 self.db_codec = db_codec 3634 self._after_connection = after_connection 3635 self.find_or_make_work_folder() 3636 ruri = uri.split('://',1)[1] 3637 m = self.REGEX_URI.match(ruri) 3638 if not m: 3639 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3640 user = credential_decoder(m.group('user')) 3641 if not user: 3642 raise SyntaxError('User required') 3643 password = credential_decoder(m.group('password')) 3644 if not password: 3645 password = '' 3646 host = m.group('host') 3647 if not host: 3648 raise SyntaxError('Host name required') 3649 port = int(m.group('port') or 3050) 3650 db = m.group('db') 3651 if not db: 3652 raise SyntaxError('Database name required') 3653 charset = m.group('charset') or 'UTF8' 3654 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3655 user = credential_decoder(user), 3656 password = credential_decoder(password), 3657 charset = charset) 3658 3659 def connector(driver_args=driver_args): 3660 return self.driver.connect(**driver_args)
3661 self.connector = connector 3662 if do_connect: self.reconnect()
3663
3664 - def create_sequence_and_triggers(self, query, table, **args):
3665 tablename = table._tablename 3666 sequence_name = table._sequence_name 3667 trigger_name = table._trigger_name 3668 self.execute(query) 3669 self.execute('create generator %s;' % sequence_name) 3670 self.execute('set generator %s to 0;' % sequence_name) 3671 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3672
3673 - def lastrowid(self,table):
3674 sequence_name = table._sequence_name 3675 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3676 return long(self.cursor.fetchone()[0])
3677
3678 3679 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3680 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3681 3682 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3683
3684 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3685 credential_decoder=IDENTITY, driver_args={}, 3686 adapter_args={}, do_connect=True, after_connection=None):
3687 self.db = db 3688 self.dbengine = "firebird" 3689 self.uri = uri 3690 if do_connect: self.find_driver(adapter_args,uri) 3691 self.pool_size = pool_size 3692 self.folder = folder 3693 self.db_codec = db_codec 3694 self._after_connection = after_connection 3695 self.find_or_make_work_folder() 3696 ruri = uri.split('://',1)[1] 3697 m = self.REGEX_URI.match(ruri) 3698 if not m: 3699 raise SyntaxError( 3700 "Invalid URI string in DAL: %s" % self.uri) 3701 user = credential_decoder(m.group('user')) 3702 if not user: 3703 raise SyntaxError('User required') 3704 password = credential_decoder(m.group('password')) 3705 if not password: 3706 password = '' 3707 pathdb = m.group('path') 3708 if not pathdb: 3709 raise SyntaxError('Path required') 3710 charset = m.group('charset') 3711 if not charset: 3712 charset = 'UTF8' 3713 host = '' 3714 driver_args.update(host=host, 3715 database=pathdb, 3716 user=credential_decoder(user), 3717 password=credential_decoder(password), 3718 charset=charset) 3719 3720 def connector(driver_args=driver_args): 3721 return self.driver.connect(**driver_args)
3722 self.connector = connector 3723 if do_connect: self.reconnect()
3724
3725 -class InformixAdapter(BaseAdapter):
3726 drivers = ('informixdb',) 3727 3728 types = { 3729 'boolean': 'CHAR(1)', 3730 'string': 'VARCHAR(%(length)s)', 3731 'text': 'BLOB SUB_TYPE 1', 3732 'json': 'BLOB SUB_TYPE 1', 3733 'password': 'VARCHAR(%(length)s)', 3734 'blob': 'BLOB SUB_TYPE 0', 3735 'upload': 'VARCHAR(%(length)s)', 3736 'integer': 'INTEGER', 3737 'bigint': 'BIGINT', 3738 'float': 'FLOAT', 3739 'double': 'DOUBLE PRECISION', 3740 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3741 'date': 'DATE', 3742 'time': 'CHAR(8)', 3743 'datetime': 'DATETIME', 3744 'id': 'SERIAL', 3745 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3746 'list:integer': 'BLOB SUB_TYPE 1', 3747 'list:string': 'BLOB SUB_TYPE 1', 3748 'list:reference': 'BLOB SUB_TYPE 1', 3749 'big-id': 'BIGSERIAL', 3750 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3751 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3752 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3753 } 3754
3755 - def RANDOM(self):
3756 return 'Random()'
3757
3758 - def NOT_NULL(self,default,field_type):
3759 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3760
3761 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3762 if limitby: 3763 (lmin, lmax) = limitby 3764 fetch_amt = lmax - lmin 3765 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3766 if lmin and (dbms_version >= 10): 3767 # Requires Informix 10.0+ 3768 sql_s += ' SKIP %d' % (lmin, ) 3769 if fetch_amt and (dbms_version >= 9): 3770 # Requires Informix 9.0+ 3771 sql_s += ' FIRST %d' % (fetch_amt, ) 3772 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3773
3774 - def represent_exceptions(self, obj, fieldtype):
3775 if fieldtype == 'date': 3776 if isinstance(obj, (datetime.date, datetime.datetime)): 3777 obj = obj.isoformat()[:10] 3778 else: 3779 obj = str(obj) 3780 return "to_date('%s','%%Y-%%m-%%d')" % obj 3781 elif fieldtype == 'datetime': 3782 if isinstance(obj, datetime.datetime): 3783 obj = obj.isoformat()[:19].replace('T',' ') 3784 elif isinstance(obj, datetime.date): 3785 obj = obj.isoformat()[:10]+' 00:00:00' 3786 else: 3787 obj = str(obj) 3788 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3789 return None
3790 3791 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3792
3793 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3794 credential_decoder=IDENTITY, driver_args={}, 3795 adapter_args={}, do_connect=True, after_connection=None):
3796 self.db = db 3797 self.dbengine = "informix" 3798 self.uri = uri 3799 if do_connect: self.find_driver(adapter_args,uri) 3800 self.pool_size = pool_size 3801 self.folder = folder 3802 self.db_codec = db_codec 3803 self._after_connection = after_connection 3804 self.find_or_make_work_folder() 3805 ruri = uri.split('://',1)[1] 3806 m = self.REGEX_URI.match(ruri) 3807 if not m: 3808 raise SyntaxError( 3809 "Invalid URI string in DAL: %s" % self.uri) 3810 user = credential_decoder(m.group('user')) 3811 if not user: 3812 raise SyntaxError('User required') 3813 password = credential_decoder(m.group('password')) 3814 if not password: 3815 password = '' 3816 host = m.group('host') 3817 if not host: 3818 raise SyntaxError('Host name required') 3819 db = m.group('db') 3820 if not db: 3821 raise SyntaxError('Database name required') 3822 user = credential_decoder(user) 3823 password = credential_decoder(password) 3824 dsn = '%s@%s' % (db,host) 3825 driver_args.update(user=user,password=password,autocommit=True) 3826 def connector(dsn=dsn,driver_args=driver_args): 3827 return self.driver.connect(dsn,**driver_args)
3828 self.connector = connector 3829 if do_connect: self.reconnect()
3830
3831 - def execute(self,command):
3832 if command[-1:]==';': 3833 command = command[:-1] 3834 return self.log_execute(command)
3835
3836 - def lastrowid(self,table):
3837 return self.cursor.sqlerrd[1]
3838
3839 -class InformixSEAdapter(InformixAdapter):
3840 """ work in progress """ 3841
3842 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3843 return 'SELECT %s %s FROM %s%s%s;' % \ 3844 (sql_s, sql_f, sql_t, sql_w, sql_o)
3845
3846 - def rowslice(self,rows,minimum=0,maximum=None):
3847 if maximum is None: 3848 return rows[minimum:] 3849 return rows[minimum:maximum]
3850
3851 -class DB2Adapter(BaseAdapter):
3852 drivers = ('pyodbc',) 3853 3854 types = { 3855 'boolean': 'CHAR(1)', 3856 'string': 'VARCHAR(%(length)s)', 3857 'text': 'CLOB', 3858 'json': 'CLOB', 3859 'password': 'VARCHAR(%(length)s)', 3860 'blob': 'BLOB', 3861 'upload': 'VARCHAR(%(length)s)', 3862 'integer': 'INT', 3863 'bigint': 'BIGINT', 3864 'float': 'REAL', 3865 'double': 'DOUBLE', 3866 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3867 'date': 'DATE', 3868 'time': 'TIME', 3869 'datetime': 'TIMESTAMP', 3870 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3871 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3872 'list:integer': 'CLOB', 3873 'list:string': 'CLOB', 3874 'list:reference': 'CLOB', 3875 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3876 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3877 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3878 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3879 } 3880
3881 - def LEFT_JOIN(self):
3882 return 'LEFT OUTER JOIN'
3883
3884 - def RANDOM(self):
3885 return 'RAND()'
3886
3887 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3888 if limitby: 3889 (lmin, lmax) = limitby 3890 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3891 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3892
3893 - def represent_exceptions(self, obj, fieldtype):
3894 if fieldtype == 'blob': 3895 obj = base64.b64encode(str(obj)) 3896 return "BLOB('%s')" % obj 3897 elif fieldtype == 'datetime': 3898 if isinstance(obj, datetime.datetime): 3899 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3900 elif isinstance(obj, datetime.date): 3901 obj = obj.isoformat()[:10]+'-00.00.00' 3902 return "'%s'" % obj 3903 return None
3904
3905 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3906 credential_decoder=IDENTITY, driver_args={}, 3907 adapter_args={}, do_connect=True, after_connection=None):
3908 self.db = db 3909 self.dbengine = "db2" 3910 self.uri = uri 3911 if do_connect: self.find_driver(adapter_args,uri) 3912 self.pool_size = pool_size 3913 self.folder = folder 3914 self.db_codec = db_codec 3915 self._after_connection = after_connection 3916 self.find_or_make_work_folder() 3917 ruri = uri.split('://', 1)[1] 3918 def connector(cnxn=ruri,driver_args=driver_args): 3919 return self.driver.connect(cnxn,**driver_args)
3920 self.connector = connector 3921 if do_connect: self.reconnect()
3922
3923 - def execute(self,command):
3924 if command[-1:]==';': 3925 command = command[:-1] 3926 return self.log_execute(command)
3927
3928 - def lastrowid(self,table):
3929 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3930 return long(self.cursor.fetchone()[0])
3931
3932 - def rowslice(self,rows,minimum=0,maximum=None):
3933 if maximum is None: 3934 return rows[minimum:] 3935 return rows[minimum:maximum]
3936
3937 3938 -class TeradataAdapter(BaseAdapter):
3939 drivers = ('pyodbc',) 3940 3941 types = { 3942 'boolean': 'CHAR(1)', 3943 'string': 'VARCHAR(%(length)s)', 3944 'text': 'CLOB', 3945 'json': 'CLOB', 3946 'password': 'VARCHAR(%(length)s)', 3947 'blob': 'BLOB', 3948 'upload': 'VARCHAR(%(length)s)', 3949 'integer': 'INT', 3950 'bigint': 'BIGINT', 3951 'float': 'REAL', 3952 'double': 'DOUBLE', 3953 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3954 'date': 'DATE', 3955 'time': 'TIME', 3956 'datetime': 'TIMESTAMP', 3957 # Modified Constraint syntax for Teradata. 3958 # Teradata does not support ON DELETE. 3959 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3960 'reference': 'INT', 3961 'list:integer': 'CLOB', 3962 'list:string': 'CLOB', 3963 'list:reference': 'CLOB', 3964 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3965 'big-reference': 'BIGINT', 3966 'reference FK': ' REFERENCES %(foreign_key)s', 3967 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3968 } 3969
3970 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3971 credential_decoder=IDENTITY, driver_args={}, 3972 adapter_args={}, do_connect=True, after_connection=None):
3973 self.db = db 3974 self.dbengine = "teradata" 3975 self.uri = uri 3976 if do_connect: self.find_driver(adapter_args,uri) 3977 self.pool_size = pool_size 3978 self.folder = folder 3979 self.db_codec = db_codec 3980 self._after_connection = after_connection 3981 self.find_or_make_work_folder() 3982 ruri = uri.split('://', 1)[1] 3983 def connector(cnxn=ruri,driver_args=driver_args): 3984 return self.driver.connect(cnxn,**driver_args)
3985 self.connector = connector 3986 if do_connect: self.reconnect()
3987
3988 - def LEFT_JOIN(self):
3989 return 'LEFT OUTER JOIN'
3990 3991 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3992 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3993 if limitby: 3994 (lmin, lmax) = limitby 3995 sql_s += ' TOP %i' % lmax 3996 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3997
3998 - def _truncate(self, table, mode=''):
3999 tablename = table._tablename 4000 return ['DELETE FROM %s ALL;' % (tablename)]
4001 4002 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
4003 # (ANSI-SQL wants this form of name 4004 # to be a delimited identifier) 4005 4006 -class IngresAdapter(BaseAdapter):
4007 drivers = ('pyodbc',) 4008 4009 types = { 4010 'boolean': 'CHAR(1)', 4011 'string': 'VARCHAR(%(length)s)', 4012 'text': 'CLOB', 4013 'json': 'CLOB', 4014 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4015 'blob': 'BLOB', 4016 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4017 'integer': 'INTEGER4', # or int8... 4018 'bigint': 'BIGINT', 4019 'float': 'FLOAT', 4020 'double': 'FLOAT8', 4021 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4022 'date': 'ANSIDATE', 4023 'time': 'TIME WITHOUT TIME ZONE', 4024 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4025 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 4026 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4027 'list:integer': 'CLOB', 4028 'list:string': 'CLOB', 4029 'list:reference': 'CLOB', 4030 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 4031 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4032 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4033 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4034 } 4035
4036 - def LEFT_JOIN(self):
4037 return 'LEFT OUTER JOIN'
4038
4039 - def RANDOM(self):
4040 return 'RANDOM()'
4041
4042 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4043 if limitby: 4044 (lmin, lmax) = limitby 4045 fetch_amt = lmax - lmin 4046 if fetch_amt: 4047 sql_s += ' FIRST %d ' % (fetch_amt, ) 4048 if lmin: 4049 # Requires Ingres 9.2+ 4050 sql_o += ' OFFSET %d' % (lmin, ) 4051 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4052
4053 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4054 credential_decoder=IDENTITY, driver_args={}, 4055 adapter_args={}, do_connect=True, after_connection=None):
4056 self.db = db 4057 self.dbengine = "ingres" 4058 self._driver = pyodbc 4059 self.uri = uri 4060 if do_connect: self.find_driver(adapter_args,uri) 4061 self.pool_size = pool_size 4062 self.folder = folder 4063 self.db_codec = db_codec 4064 self._after_connection = after_connection 4065 self.find_or_make_work_folder() 4066 connstr = uri.split(':', 1)[1] 4067 # Simple URI processing 4068 connstr = connstr.lstrip() 4069 while connstr.startswith('/'): 4070 connstr = connstr[1:] 4071 if '=' in connstr: 4072 # Assume we have a regular ODBC connection string and just use it 4073 ruri = connstr 4074 else: 4075 # Assume only (local) dbname is passed in with OS auth 4076 database_name = connstr 4077 default_driver_name = 'Ingres' 4078 vnode = '(local)' 4079 servertype = 'ingres' 4080 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 4081 def connector(cnxn=ruri,driver_args=driver_args): 4082 return self.driver.connect(cnxn,**driver_args)
4083 4084 self.connector = connector 4085 4086 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 4087 if do_connect: self.reconnect()
4088
4089 - def create_sequence_and_triggers(self, query, table, **args):
4090 # post create table auto inc code (if needed) 4091 # modify table to btree for performance.... 4092 # Older Ingres releases could use rule/trigger like Oracle above. 4093 if hasattr(table,'_primarykey'): 4094 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 4095 (table._tablename, 4096 ', '.join(["'%s'" % x for x in table.primarykey])) 4097 self.execute(modify_tbl_sql) 4098 else: 4099 tmp_seqname='%s_iisq' % table._tablename 4100 query=query.replace(INGRES_SEQNAME, tmp_seqname) 4101 self.execute('create sequence %s' % tmp_seqname) 4102 self.execute(query) 4103 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
4104 4105
4106 - def lastrowid(self,table):
4107 tmp_seqname='%s_iisq' % table 4108 self.execute('select current value for %s' % tmp_seqname) 4109 return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
4110
4111 4112 -class IngresUnicodeAdapter(IngresAdapter):
4113 4114 drivers = ('pyodbc',) 4115 4116 types = { 4117 'boolean': 'CHAR(1)', 4118 'string': 'NVARCHAR(%(length)s)', 4119 'text': 'NCLOB', 4120 'json': 'NCLOB', 4121 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 4122 'blob': 'BLOB', 4123 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4124 'integer': 'INTEGER4', # or int8... 4125 'bigint': 'BIGINT', 4126 'float': 'FLOAT', 4127 'double': 'FLOAT8', 4128 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4129 'date': 'ANSIDATE', 4130 'time': 'TIME WITHOUT TIME ZONE', 4131 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4132 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4133 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4134 'list:integer': 'NCLOB', 4135 'list:string': 'NCLOB', 4136 'list:reference': 'NCLOB', 4137 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4138 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4139 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4140 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4141 }
4142
4143 -class SAPDBAdapter(BaseAdapter):
4144 drivers = ('sapdb',) 4145 4146 support_distributed_transaction = False 4147 types = { 4148 'boolean': 'CHAR(1)', 4149 'string': 'VARCHAR(%(length)s)', 4150 'text': 'LONG', 4151 'json': 'LONG', 4152 'password': 'VARCHAR(%(length)s)', 4153 'blob': 'LONG', 4154 'upload': 'VARCHAR(%(length)s)', 4155 'integer': 'INT', 4156 'bigint': 'BIGINT', 4157 'float': 'FLOAT', 4158 'double': 'DOUBLE PRECISION', 4159 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4160 'date': 'DATE', 4161 'time': 'TIME', 4162 'datetime': 'TIMESTAMP', 4163 'id': 'INT PRIMARY KEY', 4164 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4165 'list:integer': 'LONG', 4166 'list:string': 'LONG', 4167 'list:reference': 'LONG', 4168 'big-id': 'BIGINT PRIMARY KEY', 4169 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4170 } 4171
4172 - def sequence_name(self,table):
4173 return '%s_id_Seq' % table
4174
4175 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4176 if limitby: 4177 (lmin, lmax) = limitby 4178 if len(sql_w) > 1: 4179 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4180 else: 4181 sql_w_row = 'WHERE w_row > %i' % lmin 4182 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4183 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4184
4185 - def create_sequence_and_triggers(self, query, table, **args):
4186 # following lines should only be executed if table._sequence_name does not exist 4187 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4188 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4189 % (table._tablename, table._id.name, table._sequence_name)) 4190 self.execute(query)
4191 4192 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4193 4194
4195 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4196 credential_decoder=IDENTITY, driver_args={}, 4197 adapter_args={}, do_connect=True, after_connection=None):
4198 self.db = db 4199 self.dbengine = "sapdb" 4200 self.uri = uri 4201 if do_connect: self.find_driver(adapter_args,uri) 4202 self.pool_size = pool_size 4203 self.folder = folder 4204 self.db_codec = db_codec 4205 self._after_connection = after_connection 4206 self.find_or_make_work_folder() 4207 ruri = uri.split('://',1)[1] 4208 m = self.REGEX_URI.match(ruri) 4209 if not m: 4210 raise SyntaxError("Invalid URI string in DAL") 4211 user = credential_decoder(m.group('user')) 4212 if not user: 4213 raise SyntaxError('User required') 4214 password = credential_decoder(m.group('password')) 4215 if not password: 4216 password = '' 4217 host = m.group('host') 4218 if not host: 4219 raise SyntaxError('Host name required') 4220 db = m.group('db') 4221 if not db: 4222 raise SyntaxError('Database name required') 4223 def connector(user=user, password=password, database=db, 4224 host=host, driver_args=driver_args): 4225 return self.driver.Connection(user, password, database, 4226 host, **driver_args)
4227 self.connector = connector 4228 if do_connect: self.reconnect()
4229
4230 - def lastrowid(self,table):
4231 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4232 return long(self.cursor.fetchone()[0])
4233
4234 -class CubridAdapter(MySQLAdapter):
4235 drivers = ('cubriddb',) 4236 4237 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4238
4239 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4240 credential_decoder=IDENTITY, driver_args={}, 4241 adapter_args={}, do_connect=True, after_connection=None):
4242 self.db = db 4243 self.dbengine = "cubrid" 4244 self.uri = uri 4245 if do_connect: self.find_driver(adapter_args,uri) 4246 self.pool_size = pool_size 4247 self.folder = folder 4248 self.db_codec = db_codec 4249 self._after_connection = after_connection 4250 self.find_or_make_work_folder() 4251 ruri = uri.split('://',1)[1] 4252 m = self.REGEX_URI.match(ruri) 4253 if not m: 4254 raise SyntaxError( 4255 "Invalid URI string in DAL: %s" % self.uri) 4256 user = credential_decoder(m.group('user')) 4257 if not user: 4258 raise SyntaxError('User required') 4259 password = credential_decoder(m.group('password')) 4260 if not password: 4261 password = '' 4262 host = m.group('host') 4263 if not host: 4264 raise SyntaxError('Host name required') 4265 db = m.group('db') 4266 if not db: 4267 raise SyntaxError('Database name required') 4268 port = int(m.group('port') or '30000') 4269 charset = m.group('charset') or 'utf8' 4270 user = credential_decoder(user) 4271 passwd = credential_decoder(password) 4272 def connector(host=host,port=port,db=db, 4273 user=user,passwd=password,driver_args=driver_args): 4274 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4275 self.connector = connector 4276 if do_connect: self.reconnect()
4277
4278 - def after_connection(self):
4279 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4280 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4281
4282 4283 ######## GAE MySQL ########## 4284 4285 -class DatabaseStoredFile:
4286 4287 web2py_filesystem = False 4288
4289 - def escape(self,obj):
4290 return self.db._adapter.escape(obj)
4291
4292 - def __init__(self,db,filename,mode):
4293 if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): 4294 raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") 4295 self.db = db 4296 self.filename = filename 4297 self.mode = mode 4298 if not self.web2py_filesystem: 4299 if db._adapter.dbengine == 'mysql': 4300 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4301 elif db._adapter.dbengine in ('postgres', 'sqlite'): 4302 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4303 self.db.executesql(sql) 4304 DatabaseStoredFile.web2py_filesystem = True 4305 self.p=0 4306 self.data = '' 4307 if mode in ('r','rw','a'): 4308 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4309 % filename 4310 rows = self.db.executesql(query) 4311 if rows: 4312 self.data = rows[0][0] 4313 elif exists(filename): 4314 datafile = open(filename, 'r') 4315 try: 4316 self.data = datafile.read() 4317 finally: 4318 datafile.close() 4319 elif mode in ('r','rw'): 4320 raise RuntimeError("File %s does not exist" % filename)
4321
4322 - def read(self, bytes):
4323 data = self.data[self.p:self.p+bytes] 4324 self.p += len(data) 4325 return data
4326
4327 - def readline(self):
4328 i = self.data.find('\n',self.p)+1 4329 if i>0: 4330 data, self.p = self.data[self.p:i], i 4331 else: 4332 data, self.p = self.data[self.p:], len(self.data) 4333 return data
4334
4335 - def write(self,data):
4336 self.data += data
4337
4338 - def close_connection(self):
4339 if self.db is not None: 4340 self.db.executesql( 4341 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4342 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4343 % (self.filename, self.data.replace("'","''")) 4344 self.db.executesql(query) 4345 self.db.commit() 4346 self.db = None
4347
4348 - def close(self):
4349 self.close_connection()
4350 4351 @staticmethod
4352 - def exists(db, filename):
4353 if exists(filename): 4354 return True 4355 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4356 try: 4357 if db.executesql(query): 4358 return True 4359 except Exception, e: 4360 if not (db._adapter.isOperationalError(e) or 4361 db._adapter.isProgrammingError(e)): 4362 raise 4363 # no web2py_filesystem found? 4364 tb = traceback.format_exc() 4365 LOGGER.error("Could not retrieve %s\n%s" % (filename, tb)) 4366 return False
4367
4368 4369 -class UseDatabaseStoredFile:
4370
4371 - def file_exists(self, filename):
4372 return DatabaseStoredFile.exists(self.db,filename)
4373
4374 - def file_open(self, filename, mode='rb', lock=True):
4375 return DatabaseStoredFile(self.db,filename,mode)
4376
4377 - def file_close(self, fileobj):
4378 fileobj.close_connection()
4379
4380 - def file_delete(self,filename):
4381 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4382 self.db.executesql(query) 4383 self.db.commit()
4384
4385 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4386 uploads_in_blob = True 4387 4388 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4389
4390 - def __init__(self, db, uri='google:sql://realm:domain/database', 4391 pool_size=0, folder=None, db_codec='UTF-8', 4392 credential_decoder=IDENTITY, driver_args={}, 4393 adapter_args={}, do_connect=True, after_connection=None):
4394 4395 self.db = db 4396 self.dbengine = "mysql" 4397 self.uri = uri 4398 self.pool_size = pool_size 4399 self.db_codec = db_codec 4400 self._after_connection = after_connection 4401 if do_connect: self.find_driver(adapter_args, uri) 4402 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4403 os.sep+'applications'+os.sep,1)[1]) 4404 ruri = uri.split("://")[1] 4405 m = self.REGEX_URI.match(ruri) 4406 if not m: 4407 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4408 instance = credential_decoder(m.group('instance')) 4409 self.dbstring = db = credential_decoder(m.group('db')) 4410 driver_args['instance'] = instance 4411 if not 'charset' in driver_args: 4412 driver_args['charset'] = 'utf8' 4413 self.createdb = createdb = adapter_args.get('createdb',True) 4414 if not createdb: 4415 driver_args['database'] = db 4416 def connector(driver_args=driver_args): 4417 return rdbms.connect(**driver_args)
4418 self.connector = connector 4419 if do_connect: self.reconnect()
4420
4421 - def after_connection(self):
4422 if self.createdb: 4423 # self.execute('DROP DATABASE %s' % self.dbstring) 4424 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4425 self.execute('USE %s' % self.dbstring) 4426 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4427 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4428
4429 - def execute(self, command, *a, **b):
4430 return self.log_execute(command.decode('utf8'), *a, **b)
4431
4432 - def find_driver(self,adapter_args,uri=None):
4433 self.adapter_args = adapter_args 4434 self.driver = "google"
4435
4436 -class NoSQLAdapter(BaseAdapter):
4437 can_select_for_update = False 4438 4439 @staticmethod
4440 - def to_unicode(obj):
4441 if isinstance(obj, str): 4442 return obj.decode('utf8') 4443 elif not isinstance(obj, unicode): 4444 return unicode(obj) 4445 return obj
4446
4447 - def id_query(self, table):
4448 return table._id > 0
4449
4450 - def represent(self, obj, fieldtype):
4451 field_is_type = fieldtype.startswith 4452 if isinstance(obj, CALLABLETYPES): 4453 obj = obj() 4454 if isinstance(fieldtype, SQLCustomType): 4455 return fieldtype.encoder(obj) 4456 if isinstance(obj, (Expression, Field)): 4457 raise SyntaxError("non supported on GAE") 4458 if self.dbengine == 'google:datastore': 4459 if isinstance(fieldtype, gae.Property): 4460 return obj 4461 is_string = isinstance(fieldtype,str) 4462 is_list = is_string and field_is_type('list:') 4463 if is_list: 4464 if not obj: 4465 obj = [] 4466 if not isinstance(obj, (list, tuple)): 4467 obj = [obj] 4468 if obj == '' and not \ 4469 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4470 return None 4471 if not obj is None: 4472 if isinstance(obj, list) and not is_list: 4473 obj = [self.represent(o, fieldtype) for o in obj] 4474 elif fieldtype in ('integer','bigint','id'): 4475 obj = long(obj) 4476 elif fieldtype == 'double': 4477 obj = float(obj) 4478 elif is_string and field_is_type('reference'): 4479 if isinstance(obj, (Row, Reference)): 4480 obj = obj['id'] 4481 obj = long(obj) 4482 elif fieldtype == 'boolean': 4483 if obj and not str(obj)[0].upper() in '0F': 4484 obj = True 4485 else: 4486 obj = False 4487 elif fieldtype == 'date': 4488 if not isinstance(obj, datetime.date): 4489 (y, m, d) = map(int,str(obj).strip().split('-')) 4490 obj = datetime.date(y, m, d) 4491 elif isinstance(obj,datetime.datetime): 4492 (y, m, d) = (obj.year, obj.month, obj.day) 4493 obj = datetime.date(y, m, d) 4494 elif fieldtype == 'time': 4495 if not isinstance(obj, datetime.time): 4496 time_items = map(int,str(obj).strip().split(':')[:3]) 4497 if len(time_items) == 3: 4498 (h, mi, s) = time_items 4499 else: 4500 (h, mi, s) = time_items + [0] 4501 obj = datetime.time(h, mi, s) 4502 elif fieldtype == 'datetime': 4503 if not isinstance(obj, datetime.datetime): 4504 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4505 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4506 while len(time_items)<3: 4507 time_items.append(0) 4508 (h, mi, s) = time_items 4509 obj = datetime.datetime(y, m, d, h, mi, s) 4510 elif fieldtype == 'blob': 4511 pass 4512 elif fieldtype == 'json': 4513 if isinstance(obj, basestring): 4514 obj = self.to_unicode(obj) 4515 if have_serializers: 4516 obj = serializers.loads_json(obj) 4517 elif simplejson: 4518 obj = simplejson.loads(obj) 4519 else: 4520 raise RuntimeError("missing simplejson") 4521 elif is_string and field_is_type('list:string'): 4522 return map(self.to_unicode,obj) 4523 elif is_list: 4524 return map(int,obj) 4525 else: 4526 obj = self.to_unicode(obj) 4527 return obj
4528
4529 - def _insert(self,table,fields):
4530 return 'insert %s in %s' % (fields, table)
4531
4532 - def _count(self,query,distinct=None):
4533 return 'count %s' % repr(query)
4534
4535 - def _select(self,query,fields,attributes):
4536 return 'select %s where %s' % (repr(fields), repr(query))
4537
4538 - def _delete(self,tablename, query):
4539 return 'delete %s where %s' % (repr(tablename),repr(query))
4540
4541 - def _update(self,tablename,query,fields):
4542 return 'update %s (%s) where %s' % (repr(tablename), 4543 repr(fields),repr(query))
4544
4545 - def commit(self):
4546 """ 4547 remember: no transactions on many NoSQL 4548 """ 4549 pass
4550
4551 - def rollback(self):
4552 """ 4553 remember: no transactions on many NoSQL 4554 """ 4555 pass
4556
4557 - def close_connection(self):
4558 """ 4559 remember: no transactions on many NoSQL 4560 """ 4561 pass
4562 4563 4564 # these functions should never be called!
4565 - def OR(self,first,second): raise SyntaxError("Not supported")
4566 - def AND(self,first,second): raise SyntaxError("Not supported")
4567 - def AS(self,first,second): raise SyntaxError("Not supported")
4568 - def ON(self,first,second): raise SyntaxError("Not supported")
4569 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4570 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4571 - def ADD(self,first,second): raise SyntaxError("Not supported")
4572 - def SUB(self,first,second): raise SyntaxError("Not supported")
4573 - def MUL(self,first,second): raise SyntaxError("Not supported")
4574 - def DIV(self,first,second): raise SyntaxError("Not supported")
4575 - def LOWER(self,first): raise SyntaxError("Not supported")
4576 - def UPPER(self,first): raise SyntaxError("Not supported")
4577 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4578 - def LENGTH(self, first): raise SyntaxError("Not supported")
4579 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4580 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4581 - def RANDOM(self): raise SyntaxError("Not supported")
4582 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4583 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4584 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4585 - def drop(self,table,mode): raise SyntaxError("Not supported")
4586 - def alias(self,table,alias): raise SyntaxError("Not supported")
4587 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4588 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4589 - def prepare(self,key): raise SyntaxError("Not supported")
4590 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4591 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4592 - def concat_add(self,table): raise SyntaxError("Not supported")
4593 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4594 - def create_sequence_and_triggers(self, query, table, **args): pass
4595 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4596 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4597 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4598 - def lastrowid(self,table): raise SyntaxError("Not supported")
4599 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4600
4601 4602 -class GAEF(object):
4603 - def __init__(self,name,op,value,apply):
4604 self.name=name=='id' and '__key__' or name 4605 self.op=op 4606 self.value=value 4607 self.apply=apply
4608 - def __repr__(self):
4609 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4610
4611 -class GoogleDatastoreAdapter(NoSQLAdapter):
4612 uploads_in_blob = True 4613 types = {} 4614
4615 - def file_exists(self, filename): pass
4616 - def file_open(self, filename, mode='rb', lock=True): pass
4617 - def file_close(self, fileobj): pass
4618 4619 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4620
4621 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4622 credential_decoder=IDENTITY, driver_args={}, 4623 adapter_args={}, do_connect=True, after_connection=None):
4624 self.types.update({ 4625 'boolean': gae.BooleanProperty, 4626 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4627 'text': gae.TextProperty, 4628 'json': gae.TextProperty, 4629 'password': gae.StringProperty, 4630 'blob': gae.BlobProperty, 4631 'upload': gae.StringProperty, 4632 'integer': gae.IntegerProperty, 4633 'bigint': gae.IntegerProperty, 4634 'float': gae.FloatProperty, 4635 'double': gae.FloatProperty, 4636 'decimal': GAEDecimalProperty, 4637 'date': gae.DateProperty, 4638 'time': gae.TimeProperty, 4639 'datetime': gae.DateTimeProperty, 4640 'id': None, 4641 'reference': gae.IntegerProperty, 4642 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4643 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4644 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4645 }) 4646 self.db = db 4647 self.uri = uri 4648 self.dbengine = 'google:datastore' 4649 self.folder = folder 4650 db['_lastsql'] = '' 4651 self.db_codec = 'UTF-8' 4652 self._after_connection = after_connection 4653 self.pool_size = 0 4654 match = self.REGEX_NAMESPACE.match(uri) 4655 if match: 4656 namespace_manager.set_namespace(match.group('namespace'))
4657
4658 - def parse_id(self, value, field_type):
4659 return value
4660
4661 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4662 myfields = {} 4663 for field in table: 4664 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4665 continue 4666 attr = {} 4667 if isinstance(field.custom_qualifier, dict): 4668 #this is custom properties to add to the GAE field declartion 4669 attr = field.custom_qualifier 4670 field_type = field.type 4671 if isinstance(field_type, SQLCustomType): 4672 ftype = self.types[field_type.native or field_type.type](**attr) 4673 elif isinstance(field_type, gae.Property): 4674 ftype = field_type 4675 elif field_type.startswith('id'): 4676 continue 4677 elif field_type.startswith('decimal'): 4678 precision, scale = field_type[7:].strip('()').split(',') 4679 precision = int(precision) 4680 scale = int(scale) 4681 ftype = GAEDecimalProperty(precision, scale, **attr) 4682 elif field_type.startswith('reference'): 4683 if field.notnull: 4684 attr = dict(required=True) 4685 referenced = field_type[10:].strip() 4686 ftype = self.types[field_type[:9]](referenced, **attr) 4687 elif field_type.startswith('list:reference'): 4688 if field.notnull: 4689 attr['required'] = True 4690 referenced = field_type[15:].strip() 4691 ftype = self.types[field_type[:14]](**attr) 4692 elif field_type.startswith('list:'): 4693 ftype = self.types[field_type](**attr) 4694 elif not field_type in self.types\ 4695 or not self.types[field_type]: 4696 raise SyntaxError('Field: unknown field type: %s' % field_type) 4697 else: 4698 ftype = self.types[field_type](**attr) 4699 myfields[field.name] = ftype 4700 if not polymodel: 4701 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4702 elif polymodel==True: 4703 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4704 elif isinstance(polymodel,Table): 4705 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4706 else: 4707 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4708 return None
4709
4710 - def expand(self,expression,field_type=None):
4711 if isinstance(expression,Field): 4712 if expression.type in ('text', 'blob', 'json'): 4713 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4714 return expression.name 4715 elif isinstance(expression, (Expression, Query)): 4716 if not expression.second is None: 4717 return expression.op(expression.first, expression.second) 4718 elif not expression.first is None: 4719 return expression.op(expression.first) 4720 else: 4721 return expression.op() 4722 elif field_type: 4723 return self.represent(expression,field_type) 4724 elif isinstance(expression,(list,tuple)): 4725 return ','.join([self.represent(item,field_type) for item in expression]) 4726 else: 4727 return str(expression)
4728 4729 ### TODO from gql.py Expression
4730 - def AND(self,first,second):
4731 a = self.expand(first) 4732 b = self.expand(second) 4733 if b[0].name=='__key__' and a[0].name!='__key__': 4734 return b+a 4735 return a+b
4736
4737 - def EQ(self,first,second=None):
4738 if isinstance(second, Key): 4739 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4740 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4741
4742 - def NE(self,first,second=None):
4743 if first.type != 'id': 4744 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4745 else: 4746 if not second is None: 4747 second = Key.from_path(first._tablename, long(second)) 4748 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4749
4750 - def LT(self,first,second=None):
4751 if first.type != 'id': 4752 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4753 else: 4754 second = Key.from_path(first._tablename, long(second)) 4755 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4756
4757 - def LE(self,first,second=None):
4758 if first.type != 'id': 4759 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4760 else: 4761 second = Key.from_path(first._tablename, long(second)) 4762 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4763
4764 - def GT(self,first,second=None):
4765 if first.type != 'id' or second==0 or second == '0': 4766 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4767 else: 4768 second = Key.from_path(first._tablename, long(second)) 4769 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4770
4771 - def GE(self,first,second=None):
4772 if first.type != 'id': 4773 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4774 else: 4775 second = Key.from_path(first._tablename, long(second)) 4776 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4777
4778 - def INVERT(self,first):
4779 return '-%s' % first.name
4780
4781 - def COMMA(self,first,second):
4782 return '%s, %s' % (self.expand(first),self.expand(second))
4783
4784 - def BELONGS(self,first,second=None):
4785 if not isinstance(second,(list, tuple)): 4786 raise SyntaxError("Not supported") 4787 if first.type != 'id': 4788 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4789 else: 4790 second = [Key.from_path(first._tablename, int(i)) for i in second] 4791 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4792
4793 - def CONTAINS(self,first,second,case_sensitive=False):
4794 # silently ignoring: GAE can only do case sensitive matches! 4795 if not first.type.startswith('list:'): 4796 raise SyntaxError("Not supported") 4797 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4798
4799 - def NOT(self,first):
4800 nops = { self.EQ: self.NE, 4801 self.NE: self.EQ, 4802 self.LT: self.GE, 4803 self.GT: self.LE, 4804 self.LE: self.GT, 4805 self.GE: self.LT} 4806 if not isinstance(first,Query): 4807 raise SyntaxError("Not suported") 4808 nop = nops.get(first.op,None) 4809 if not nop: 4810 raise SyntaxError("Not suported %s" % first.op.__name__) 4811 first.op = nop 4812 return self.expand(first)
4813
4814 - def truncate(self,table,mode):
4815 self.db(self.db._adapter.id_query(table)).delete()
4816
4817 - def select_raw(self,query,fields=None,attributes=None):
4818 db = self.db 4819 fields = fields or [] 4820 attributes = attributes or {} 4821 args_get = attributes.get 4822 new_fields = [] 4823 for item in fields: 4824 if isinstance(item,SQLALL): 4825 new_fields += item._table 4826 else: 4827 new_fields.append(item) 4828 fields = new_fields 4829 if query: 4830 tablename = self.get_table(query) 4831 elif fields: 4832 tablename = fields[0].tablename 4833 query = db._adapter.id_query(fields[0].table) 4834 else: 4835 raise SyntaxError("Unable to determine a tablename") 4836 4837 if query: 4838 if use_common_filters(query): 4839 query = self.common_filter(query,[tablename]) 4840 4841 #tableobj is a GAE Model class (or subclass) 4842 tableobj = db[tablename]._tableobj 4843 filters = self.expand(query) 4844 4845 projection = None 4846 if len(db[tablename].fields) == len(fields): 4847 #getting all fields, not a projection query 4848 projection = None 4849 elif args_get('projection') == True: 4850 projection = [] 4851 for f in fields: 4852 if f.type in ['text', 'blob', 'json']: 4853 raise SyntaxError( 4854 "text and blob field types not allowed in projection queries") 4855 else: 4856 projection.append(f.name) 4857 elif args_get('filterfields') == True: 4858 projection = [] 4859 for f in fields: 4860 projection.append(f.name) 4861 4862 # real projection's can't include 'id'. 4863 # it will be added to the result later 4864 query_projection = [ 4865 p for p in projection if \ 4866 p != db[tablename]._id.name] if projection and \ 4867 args_get('projection') == True\ 4868 else None 4869 4870 cursor = None 4871 if isinstance(args_get('reusecursor'), str): 4872 cursor = args_get('reusecursor') 4873 items = gae.Query(tableobj, projection=query_projection, 4874 cursor=cursor) 4875 4876 for filter in filters: 4877 if args_get('projection') == True and \ 4878 filter.name in query_projection and \ 4879 filter.op in ['=', '<=', '>=']: 4880 raise SyntaxError( 4881 "projection fields cannot have equality filters") 4882 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4883 continue 4884 elif filter.name=='__key__' and filter.op=='=': 4885 if filter.value==0: 4886 items = [] 4887 elif isinstance(filter.value, Key): 4888 # key qeuries return a class instance, 4889 # can't use projection 4890 # extra values will be ignored in post-processing later 4891 item = tableobj.get(filter.value) 4892 items = (item and [item]) or [] 4893 else: 4894 # key qeuries return a class instance, 4895 # can't use projection 4896 # extra values will be ignored in post-processing later 4897 item = tableobj.get_by_id(filter.value) 4898 items = (item and [item]) or [] 4899 elif isinstance(items,list): # i.e. there is a single record! 4900 items = [i for i in items if filter.apply( 4901 getattr(item,filter.name),filter.value)] 4902 else: 4903 if filter.name=='__key__' and filter.op != 'in': 4904 items.order('__key__') 4905 items = items.filter('%s %s' % (filter.name,filter.op), 4906 filter.value) 4907 if not isinstance(items,list): 4908 if args_get('left', None): 4909 raise SyntaxError('Set: no left join in appengine') 4910 if args_get('groupby', None): 4911 raise SyntaxError('Set: no groupby in appengine') 4912 orderby = args_get('orderby', False) 4913 if orderby: 4914 ### THIS REALLY NEEDS IMPROVEMENT !!! 4915 if isinstance(orderby, (list, tuple)): 4916 orderby = xorify(orderby) 4917 if isinstance(orderby,Expression): 4918 orderby = self.expand(orderby) 4919 orders = orderby.split(', ') 4920 for order in orders: 4921 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4922 items = items.order(order) 4923 if args_get('limitby', None): 4924 (lmin, lmax) = attributes['limitby'] 4925 (limit, offset) = (lmax - lmin, lmin) 4926 rows = items.fetch(limit,offset=offset) 4927 #cursor is only useful if there was a limit and we didn't return 4928 # all results 4929 if args_get('reusecursor'): 4930 db['_lastcursor'] = items.cursor() 4931 items = rows 4932 return (items, tablename, projection or db[tablename].fields)
4933
4934 - def select(self,query,fields,attributes):
4935 """ 4936 This is the GAE version of select. some notes to consider: 4937 - db['_lastsql'] is not set because there is not SQL statement string 4938 for a GAE query 4939 - 'nativeRef' is a magical fieldname used for self references on GAE 4940 - optional attribute 'projection' when set to True will trigger 4941 use of the GAE projection queries. note that there are rules for 4942 what is accepted imposed by GAE: each field must be indexed, 4943 projection queries cannot contain blob or text fields, and you 4944 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4945 - optional attribute 'filterfields' when set to True web2py will only 4946 parse the explicitly listed fields into the Rows object, even though 4947 all fields are returned in the query. This can be used to reduce 4948 memory usage in cases where true projection queries are not 4949 usable. 4950 - optional attribute 'reusecursor' allows use of cursor with queries 4951 that have the limitby attribute. Set the attribute to True for the 4952 first query, set it to the value of db['_lastcursor'] to continue 4953 a previous query. The user must save the cursor value between 4954 requests, and the filters must be identical. It is up to the user 4955 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4956 """ 4957 4958 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4959 # self.db['_lastsql'] = self._select(query,fields,attributes) 4960 rows = [[(t==self.db[tablename]._id.name and item) or \ 4961 (t=='nativeRef' and item) or getattr(item, t) \ 4962 for t in fields] for item in items] 4963 colnames = ['%s.%s' % (tablename, t) for t in fields] 4964 processor = attributes.get('processor',self.parse) 4965 return processor(rows,fields,colnames,False)
4966
4967 - def count(self,query,distinct=None,limit=None):
4968 if distinct: 4969 raise RuntimeError("COUNT DISTINCT not supported") 4970 (items, tablename, fields) = self.select_raw(query) 4971 # self.db['_lastsql'] = self._count(query) 4972 try: 4973 return len(items) 4974 except TypeError: 4975 return items.count(limit=limit)
4976
4977 - def delete(self,tablename, query):
4978 """ 4979 This function was changed on 2010-05-04 because according to 4980 http://code.google.com/p/googleappengine/issues/detail?id=3119 4981 GAE no longer supports deleting more than 1000 records. 4982 """ 4983 # self.db['_lastsql'] = self._delete(tablename,query) 4984 (items, tablename, fields) = self.select_raw(query) 4985 # items can be one item or a query 4986 if not isinstance(items,list): 4987 #use a keys_only query to ensure that this runs as a datastore 4988 # small operations 4989 leftitems = items.fetch(1000, keys_only=True) 4990 counter = 0 4991 while len(leftitems): 4992 counter += len(leftitems) 4993 gae.delete(leftitems) 4994 leftitems = items.fetch(1000, keys_only=True) 4995 else: 4996 counter = len(items) 4997 gae.delete(items) 4998 return counter
4999
5000 - def update(self,tablename,query,update_fields):
5001 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 5002 (items, tablename, fields) = self.select_raw(query) 5003 counter = 0 5004 for item in items: 5005 for field, value in update_fields: 5006 setattr(item, field.name, self.represent(value,field.type)) 5007 item.put() 5008 counter += 1 5009 LOGGER.info(str(counter)) 5010 return counter
5011
5012 - def insert(self,table,fields):
5013 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 5014 # table._db['_lastsql'] = self._insert(table,fields) 5015 tmp = table._tableobj(**dfields) 5016 tmp.put() 5017 rid = Reference(tmp.key().id()) 5018 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 5019 return rid
5020
5021 - def bulk_insert(self,table,items):
5022 parsed_items = [] 5023 for item in items: 5024 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 5025 parsed_items.append(table._tableobj(**dfields)) 5026 gae.put(parsed_items) 5027 return True
5028
5029 -def uuid2int(uuidv):
5030 return uuid.UUID(uuidv).int
5031
5032 -def int2uuid(n):
5033 return str(uuid.UUID(int=n))
5034
5035 -class CouchDBAdapter(NoSQLAdapter):
5036 drivers = ('couchdb',) 5037 5038 uploads_in_blob = True 5039 types = { 5040 'boolean': bool, 5041 'string': str, 5042 'text': str, 5043 'json': str, 5044 'password': str, 5045 'blob': str, 5046 'upload': str, 5047 'integer': long, 5048 'bigint': long, 5049 'float': float, 5050 'double': float, 5051 'date': datetime.date, 5052 'time': datetime.time, 5053 'datetime': datetime.datetime, 5054 'id': long, 5055 'reference': long, 5056 'list:string': list, 5057 'list:integer': list, 5058 'list:reference': list, 5059 } 5060
5061 - def file_exists(self, filename): pass
5062 - def file_open(self, filename, mode='rb', lock=True): pass
5063 - def file_close(self, fileobj): pass
5064
5065 - def expand(self,expression,field_type=None):
5066 if isinstance(expression,Field): 5067 if expression.type=='id': 5068 return "%s._id" % expression.tablename 5069 return BaseAdapter.expand(self,expression,field_type)
5070
5071 - def AND(self,first,second):
5072 return '(%s && %s)' % (self.expand(first),self.expand(second))
5073
5074 - def OR(self,first,second):
5075 return '(%s || %s)' % (self.expand(first),self.expand(second))
5076
5077 - def EQ(self,first,second):
5078 if second is None: 5079 return '(%s == null)' % self.expand(first) 5080 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
5081
5082 - def NE(self,first,second):
5083 if second is None: 5084 return '(%s != null)' % self.expand(first) 5085 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
5086
5087 - def COMMA(self,first,second):
5088 return '%s + %s' % (self.expand(first),self.expand(second))
5089
5090 - def represent(self, obj, fieldtype):
5091 value = NoSQLAdapter.represent(self, obj, fieldtype) 5092 if fieldtype=='id': 5093 return repr(str(long(value))) 5094 elif fieldtype in ('date','time','datetime','boolean'): 5095 return serializers.json(value) 5096 return repr(not isinstance(value,unicode) and value \ 5097 or value and value.encode('utf8'))
5098
5099 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 5100 pool_size=0,folder=None,db_codec ='UTF-8', 5101 credential_decoder=IDENTITY, driver_args={}, 5102 adapter_args={}, do_connect=True, after_connection=None):
5103 self.db = db 5104 self.uri = uri 5105 if do_connect: self.find_driver(adapter_args) 5106 self.dbengine = 'couchdb' 5107 self.folder = folder 5108 db['_lastsql'] = '' 5109 self.db_codec = 'UTF-8' 5110 self._after_connection = after_connection 5111 self.pool_size = pool_size 5112 5113 url='http://'+uri[10:] 5114 def connector(url=url,driver_args=driver_args): 5115 return self.driver.Server(url,**driver_args)
5116 self.reconnect(connector,cursor=False)
5117
5118 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
5119 if migrate: 5120 try: 5121 self.connection.create(table._tablename) 5122 except: 5123 pass
5124
5125 - def insert(self,table,fields):
5126 id = uuid2int(web2py_uuid()) 5127 ctable = self.connection[table._tablename] 5128 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 5129 values['_id'] = str(id) 5130 ctable.save(values) 5131 return id
5132
5133 - def _select(self,query,fields,attributes):
5134 if not isinstance(query,Query): 5135 raise SyntaxError("Not Supported") 5136 for key in set(attributes.keys())-SELECT_ARGS: 5137 raise SyntaxError('invalid select attribute: %s' % key) 5138 new_fields=[] 5139 for item in fields: 5140 if isinstance(item,SQLALL): 5141 new_fields += item._table 5142 else: 5143 new_fields.append(item) 5144 def uid(fd): 5145 return fd=='id' and '_id' or fd
5146 def get(row,fd): 5147 return fd=='id' and long(row['_id']) or row.get(fd,None) 5148 fields = new_fields 5149 tablename = self.get_table(query) 5150 fieldnames = [f.name for f in (fields or self.db[tablename])] 5151 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5152 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5153 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5154 dict(t=tablename, 5155 query=self.expand(query), 5156 order='%s._id' % tablename, 5157 fields=fields) 5158 return fn, colnames 5159
5160 - def select(self,query,fields,attributes):
5161 if not isinstance(query,Query): 5162 raise SyntaxError("Not Supported") 5163 fn, colnames = self._select(query,fields,attributes) 5164 tablename = colnames[0].split('.')[0] 5165 ctable = self.connection[tablename] 5166 rows = [cols['value'] for cols in ctable.query(fn)] 5167 processor = attributes.get('processor',self.parse) 5168 return processor(rows,fields,colnames,False)
5169
5170 - def delete(self,tablename,query):
5171 if not isinstance(query,Query): 5172 raise SyntaxError("Not Supported") 5173 if query.first.type=='id' and query.op==self.EQ: 5174 id = query.second 5175 tablename = query.first.tablename 5176 assert(tablename == query.first.tablename) 5177 ctable = self.connection[tablename] 5178 try: 5179 del ctable[str(id)] 5180 return 1 5181 except couchdb.http.ResourceNotFound: 5182 return 0 5183 else: 5184 tablename = self.get_table(query) 5185 rows = self.select(query,[self.db[tablename]._id],{}) 5186 ctable = self.connection[tablename] 5187 for row in rows: 5188 del ctable[str(row.id)] 5189 return len(rows)
5190
5191 - def update(self,tablename,query,fields):
5192 if not isinstance(query,Query): 5193 raise SyntaxError("Not Supported") 5194 if query.first.type=='id' and query.op==self.EQ: 5195 id = query.second 5196 tablename = query.first.tablename 5197 ctable = self.connection[tablename] 5198 try: 5199 doc = ctable[str(id)] 5200 for key,value in fields: 5201 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5202 ctable.save(doc) 5203 return 1 5204 except couchdb.http.ResourceNotFound: 5205 return 0 5206 else: 5207 tablename = self.get_table(query) 5208 rows = self.select(query,[self.db[tablename]._id],{}) 5209 ctable = self.connection[tablename] 5210 table = self.db[tablename] 5211 for row in rows: 5212 doc = ctable[str(row.id)] 5213 for key,value in fields: 5214 doc[key.name] = self.represent(value,table[key.name].type) 5215 ctable.save(doc) 5216 return len(rows)
5217
5218 - def count(self,query,distinct=None):
5219 if distinct: 5220 raise RuntimeError("COUNT DISTINCT not supported") 5221 if not isinstance(query,Query): 5222 raise SyntaxError("Not Supported") 5223 tablename = self.get_table(query) 5224 rows = self.select(query,[self.db[tablename]._id],{}) 5225 return len(rows)
5226
5227 -def cleanup(text):
5228 """ 5229 validates that the given text is clean: only contains [0-9a-zA-Z_] 5230 """ 5231 if not REGEX_ALPHANUMERIC.match(text): 5232 raise SyntaxError('invalid table or field name: %s' % text) 5233 return text
5234
5235 -class MongoDBAdapter(NoSQLAdapter):
5236 native_json = True 5237 drivers = ('pymongo',) 5238 5239 uploads_in_blob = True 5240 5241 types = { 5242 'boolean': bool, 5243 'string': str, 5244 'text': str, 5245 'json': str, 5246 'password': str, 5247 'blob': str, 5248 'upload': str, 5249 'integer': long, 5250 'bigint': long, 5251 'float': float, 5252 'double': float, 5253 'date': datetime.date, 5254 'time': datetime.time, 5255 'datetime': datetime.datetime, 5256 'id': long, 5257 'reference': long, 5258 'list:string': list, 5259 'list:integer': list, 5260 'list:reference': list, 5261 } 5262 5263 error_messages = {"javascript_needed": "This must yet be replaced" + 5264 " with javascript in order to work."} 5265
5266 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5267 pool_size=0, folder=None, db_codec ='UTF-8', 5268 credential_decoder=IDENTITY, driver_args={}, 5269 adapter_args={}, do_connect=True, after_connection=None):
5270 5271 self.db = db 5272 self.uri = uri 5273 if do_connect: self.find_driver(adapter_args) 5274 import random 5275 from bson.objectid import ObjectId 5276 from bson.son import SON 5277 import pymongo.uri_parser 5278 5279 m = pymongo.uri_parser.parse_uri(uri) 5280 5281 self.SON = SON 5282 self.ObjectId = ObjectId 5283 self.random = random 5284 5285 self.dbengine = 'mongodb' 5286 self.folder = folder 5287 db['_lastsql'] = '' 5288 self.db_codec = 'UTF-8' 5289 self._after_connection = after_connection 5290 self.pool_size = pool_size 5291 #this is the minimum amount of replicates that it should wait 5292 # for on insert/update 5293 self.minimumreplication = adapter_args.get('minimumreplication',0) 5294 # by default all inserts and selects are performand asynchronous, 5295 # but now the default is 5296 # synchronous, except when overruled by either this default or 5297 # function parameter 5298 self.safe = adapter_args.get('safe',True) 5299 5300 if isinstance(m,tuple): 5301 m = {"database" : m[1]} 5302 if m.get('database')==None: 5303 raise SyntaxError("Database is required!") 5304 5305 def connector(uri=self.uri,m=m): 5306 # Connection() is deprecated 5307 if hasattr(self.driver, "MongoClient"): 5308 Connection = self.driver.MongoClient 5309 else: 5310 Connection = self.driver.Connection 5311 return Connection(uri)[m.get('database')]
5312 5313 self.reconnect(connector,cursor=False)
5314
5315 - def object_id(self, arg=None):
5316 """ Convert input to a valid Mongodb ObjectId instance 5317 5318 self.object_id("<random>") -> ObjectId (not unique) instance """ 5319 if not arg: 5320 arg = 0 5321 if isinstance(arg, basestring): 5322 # we assume an integer as default input 5323 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5324 if arg.isdigit() and (not rawhex): 5325 arg = int(arg) 5326 elif arg == "<random>": 5327 arg = int("0x%sL" % \ 5328 "".join([self.random.choice("0123456789abcdef") \ 5329 for x in range(24)]), 0) 5330 elif arg.isalnum(): 5331 if not arg.startswith("0x"): 5332 arg = "0x%s" % arg 5333 try: 5334 arg = int(arg, 0) 5335 except ValueError, e: 5336 raise ValueError( 5337 "invalid objectid argument string: %s" % e) 5338 else: 5339 raise ValueError("Invalid objectid argument string. " + 5340 "Requires an integer or base 16 value") 5341 elif isinstance(arg, self.ObjectId): 5342 return arg 5343 5344 if not isinstance(arg, (int, long)): 5345 raise TypeError("object_id argument must be of type " + 5346 "ObjectId or an objectid representable integer") 5347 if arg == 0: 5348 hexvalue = "".zfill(24) 5349 else: 5350 hexvalue = hex(arg)[2:].replace("L", "") 5351 return self.ObjectId(hexvalue)
5352
5353 - def parse_reference(self, value, field_type):
5354 # here we have to check for ObjectID before base parse 5355 if isinstance(value, self.ObjectId): 5356 value = long(str(value), 16) 5357 return super(MongoDBAdapter, 5358 self).parse_reference(value, field_type)
5359
5360 - def parse_id(self, value, field_type):
5361 if isinstance(value, self.ObjectId): 5362 value = long(str(value), 16) 5363 return super(MongoDBAdapter, 5364 self).parse_id(value, field_type)
5365
5366 - def represent(self, obj, fieldtype):
5367 # the base adatpter does not support MongoDB ObjectId 5368 if isinstance(obj, self.ObjectId): 5369 value = obj 5370 else: 5371 value = NoSQLAdapter.represent(self, obj, fieldtype) 5372 # reference types must be convert to ObjectID 5373 if fieldtype =='date': 5374 if value == None: 5375 return value 5376 # this piece of data can be stripped off based on the fieldtype 5377 t = datetime.time(0, 0, 0) 5378 # mongodb doesn't has a date object and so it must datetime, 5379 # string or integer 5380 return datetime.datetime.combine(value, t) 5381 elif fieldtype == 'time': 5382 if value == None: 5383 return value 5384 # this piece of data can be stripped of based on the fieldtype 5385 d = datetime.date(2000, 1, 1) 5386 # mongodb doesn't has a time object and so it must datetime, 5387 # string or integer 5388 return datetime.datetime.combine(d, value) 5389 elif fieldtype == "blob": 5390 from bson import Binary 5391 if not isinstance(value, Binary): 5392 return Binary(value) 5393 return value 5394 elif (isinstance(fieldtype, basestring) and 5395 fieldtype.startswith('list:')): 5396 if fieldtype.startswith('list:reference'): 5397 newval = [] 5398 for v in value: 5399 newval.append(self.object_id(v)) 5400 return newval 5401 return value 5402 elif ((isinstance(fieldtype, basestring) and 5403 fieldtype.startswith("reference")) or 5404 (isinstance(fieldtype, Table)) or fieldtype=="id"): 5405 value = self.object_id(value) 5406 return value
5407
5408 - def create_table(self, table, migrate=True, fake_migrate=False, 5409 polymodel=None, isCapped=False):
5410 if isCapped: 5411 raise RuntimeError("Not implemented")
5412
5413 - def count(self, query, distinct=None, snapshot=True):
5414 if distinct: 5415 raise RuntimeError("COUNT DISTINCT not supported") 5416 if not isinstance(query,Query): 5417 raise SyntaxError("Not Supported") 5418 tablename = self.get_table(query) 5419 return long(self.select(query,[self.db[tablename]._id], {}, 5420 count=True,snapshot=snapshot)['count'])
5421 # Maybe it would be faster if we just implemented the pymongo 5422 # .count() function which is probably quicker? 5423 # therefor call __select() connection[table].find(query).count() 5424 # Since this will probably reduce the return set? 5425
5426 - def expand(self, expression, field_type=None):
5427 if isinstance(expression, Query): 5428 # any query using 'id':= 5429 # set name as _id (as per pymongo/mongodb primary key) 5430 # convert second arg to an objectid field 5431 # (if its not already) 5432 # if second arg is 0 convert to objectid 5433 if isinstance(expression.first,Field) and \ 5434 ((expression.first.type == 'id') or \ 5435 ("reference" in expression.first.type)): 5436 if expression.first.type == 'id': 5437 expression.first.name = '_id' 5438 # cast to Mongo ObjectId 5439 if isinstance(expression.second, (tuple, list, set)): 5440 expression.second = [self.object_id(item) for 5441 item in expression.second] 5442 else: 5443 expression.second = self.object_id(expression.second) 5444 result = expression.op(expression.first, expression.second) 5445 5446 if isinstance(expression, Field): 5447 if expression.type=='id': 5448 result = "_id" 5449 else: 5450 result = expression.name 5451 elif isinstance(expression, (Expression, Query)): 5452 if not expression.second is None: 5453 result = expression.op(expression.first, expression.second) 5454 elif not expression.first is None: 5455 result = expression.op(expression.first) 5456 elif not isinstance(expression.op, str): 5457 result = expression.op() 5458 else: 5459 result = expression.op 5460 elif field_type: 5461 result = self.represent(expression,field_type) 5462 elif isinstance(expression,(list,tuple)): 5463 result = ','.join(self.represent(item,field_type) for 5464 item in expression) 5465 else: 5466 result = expression 5467 return result
5468
5469 - def drop(self, table, mode=''):
5470 ctable = self.connection[table._tablename] 5471 ctable.drop()
5472
5473 - def truncate(self, table, mode, safe=None):
5474 if safe == None: 5475 safe=self.safe 5476 ctable = self.connection[table._tablename] 5477 ctable.remove(None, safe=True)
5478
5479 - def _select(self, query, fields, attributes):
5480 if 'for_update' in attributes: 5481 logging.warn('mongodb does not support for_update') 5482 for key in set(attributes.keys())-set(('limitby', 5483 'orderby','for_update')): 5484 if attributes[key]!=None: 5485 logging.warn('select attribute not implemented: %s' % key) 5486 5487 new_fields=[] 5488 mongosort_list = [] 5489 5490 # try an orderby attribute 5491 orderby = attributes.get('orderby', False) 5492 limitby = attributes.get('limitby', False) 5493 # distinct = attributes.get('distinct', False) 5494 if orderby: 5495 if isinstance(orderby, (list, tuple)): 5496 orderby = xorify(orderby) 5497 5498 # !!!! need to add 'random' 5499 for f in self.expand(orderby).split(','): 5500 if f.startswith('-'): 5501 mongosort_list.append((f[1:], -1)) 5502 else: 5503 mongosort_list.append((f, 1)) 5504 if limitby: 5505 limitby_skip, limitby_limit = limitby[0], int(limitby[1]) 5506 else: 5507 limitby_skip = limitby_limit = 0 5508 5509 mongofields_dict = self.SON() 5510 mongoqry_dict = {} 5511 for item in fields: 5512 if isinstance(item, SQLALL): 5513 new_fields += item._table 5514 else: 5515 new_fields.append(item) 5516 fields = new_fields 5517 if isinstance(query,Query): 5518 tablename = self.get_table(query) 5519 elif len(fields) != 0: 5520 tablename = fields[0].tablename 5521 else: 5522 raise SyntaxError("The table name could not be found in " + 5523 "the query nor from the select statement.") 5524 mongoqry_dict = self.expand(query) 5525 fields = fields or self.db[tablename] 5526 for field in fields: 5527 mongofields_dict[field.name] = 1 5528 5529 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5530 limitby_limit, limitby_skip
5531
5532 - def select(self, query, fields, attributes, count=False, 5533 snapshot=False):
5534 # TODO: support joins 5535 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5536 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5537 ctable = self.connection[tablename] 5538 5539 if count: 5540 return {'count' : ctable.find( 5541 mongoqry_dict, mongofields_dict, 5542 skip=limitby_skip, limit=limitby_limit, 5543 sort=mongosort_list, snapshot=snapshot).count()} 5544 else: 5545 # pymongo cursor object 5546 mongo_list_dicts = ctable.find(mongoqry_dict, 5547 mongofields_dict, skip=limitby_skip, 5548 limit=limitby_limit, sort=mongosort_list, 5549 snapshot=snapshot) 5550 rows = [] 5551 # populate row in proper order 5552 # Here we replace ._id with .id to follow the standard naming 5553 colnames = [] 5554 newnames = [] 5555 for field in fields: 5556 colname = str(field) 5557 colnames.append(colname) 5558 tablename, fieldname = colname.split(".") 5559 if fieldname == "_id": 5560 # Mongodb reserved uuid key 5561 field.name = "id" 5562 newnames.append(".".join((tablename, field.name))) 5563 5564 for record in mongo_list_dicts: 5565 row=[] 5566 for colname in colnames: 5567 tablename, fieldname = colname.split(".") 5568 # switch to Mongo _id uuids for retrieving 5569 # record id's 5570 if fieldname == "id": fieldname = "_id" 5571 if fieldname in record: 5572 value = record[fieldname] 5573 else: 5574 value = None 5575 row.append(value) 5576 rows.append(row) 5577 5578 processor = attributes.get('processor', self.parse) 5579 result = processor(rows, fields, newnames, False) 5580 return result
5581
5582 - def _insert(self, table, fields):
5583 values = dict() 5584 for k, v in fields: 5585 if not k.name in ["id", "safe"]: 5586 fieldname = k.name 5587 fieldtype = table[k.name].type 5588 values[fieldname] = self.represent(v, fieldtype) 5589 return values
5590 5591 # Safe determines whether a asynchronious request is done or a 5592 # synchronious action is done 5593 # For safety, we use by default synchronous requests
5594 - def insert(self, table, fields, safe=None):
5595 if safe==None: 5596 safe = self.safe 5597 ctable = self.connection[table._tablename] 5598 values = self._insert(table, fields) 5599 ctable.insert(values, safe=safe) 5600 return long(str(values['_id']), 16)
5601 5602 #this function returns a dict with the where clause and update fields
5603 - def _update(self, tablename, query, fields):
5604 if not isinstance(query, Query): 5605 raise SyntaxError("Not Supported") 5606 filter = None 5607 if query: 5608 filter = self.expand(query) 5609 # do not try to update id fields to avoid backend errors 5610 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5611 k, v in fields if (not k.name in ("_id", "id")))} 5612 return modify, filter
5613
5614 - def update(self, tablename, query, fields, safe=None):
5615 if safe == None: 5616 safe = self.safe 5617 # return amount of adjusted rows or zero, but no exceptions 5618 # @ related not finding the result 5619 if not isinstance(query, Query): 5620 raise RuntimeError("Not implemented") 5621 amount = self.count(query, False) 5622 modify, filter = self._update(tablename, query, fields) 5623 try: 5624 result = self.connection[tablename].update(filter, 5625 modify, multi=True, safe=safe) 5626 if safe: 5627 try: 5628 # if result count is available fetch it 5629 return result["n"] 5630 except (KeyError, AttributeError, TypeError): 5631 return amount 5632 else: 5633 return amount 5634 except Exception, e: 5635 # TODO Reverse update query to verifiy that the query succeded 5636 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5637
5638 - def _delete(self, tablename, query):
5639 if not isinstance(query, Query): 5640 raise RuntimeError("query type %s is not supported" % \ 5641 type(query)) 5642 return self.expand(query)
5643
5644 - def delete(self, tablename, query, safe=None):
5645 if safe is None: 5646 safe = self.safe 5647 amount = 0 5648 amount = self.count(query, False) 5649 filter = self._delete(tablename, query) 5650 self.connection[tablename].remove(filter, safe=safe) 5651 return amount
5652
5653 - def bulk_insert(self, table, items):
5654 return [self.insert(table,item) for item in items]
5655 5656 ## OPERATORS
5657 - def INVERT(self, first):
5658 #print "in invert first=%s" % first 5659 return '-%s' % self.expand(first)
5660 5661 # TODO This will probably not work:(
5662 - def NOT(self, first):
5663 result = {} 5664 result["$not"] = self.expand(first) 5665 return result
5666
5667 - def AND(self,first,second):
5668 f = self.expand(first) 5669 s = self.expand(second) 5670 f.update(s) 5671 return f
5672
5673 - def OR(self,first,second):
5674 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5675 result = {} 5676 f = self.expand(first) 5677 s = self.expand(second) 5678 result['$or'] = [f,s] 5679 return result
5680
5681 - def BELONGS(self, first, second):
5682 if isinstance(second, str): 5683 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5684 elif second==[] or second==() or second==set(): 5685 return {1:0} 5686 items = [self.expand(item, first.type) for item in second] 5687 return {self.expand(first) : {"$in" : items} }
5688
5689 - def EQ(self,first,second=None):
5690 result = {} 5691 result[self.expand(first)] = self.expand(second) 5692 return result
5693
5694 - def NE(self, first, second=None):
5695 result = {} 5696 result[self.expand(first)] = {'$ne': self.expand(second)} 5697 return result
5698
5699 - def LT(self,first,second=None):
5700 if second is None: 5701 raise RuntimeError("Cannot compare %s < None" % first) 5702 result = {} 5703 result[self.expand(first)] = {'$lt': self.expand(second)} 5704 return result
5705
5706 - def LE(self,first,second=None):
5707 if second is None: 5708 raise RuntimeError("Cannot compare %s <= None" % first) 5709 result = {} 5710 result[self.expand(first)] = {'$lte': self.expand(second)} 5711 return result
5712
5713 - def GT(self,first,second):
5714 result = {} 5715 result[self.expand(first)] = {'$gt': self.expand(second)} 5716 return result
5717
5718 - def GE(self,first,second=None):
5719 if second is None: 5720 raise RuntimeError("Cannot compare %s >= None" % first) 5721 result = {} 5722 result[self.expand(first)] = {'$gte': self.expand(second)} 5723 return result
5724
5725 - def ADD(self, first, second):
5726 raise NotImplementedError(self.error_messages["javascript_needed"]) 5727 return '%s + %s' % (self.expand(first), 5728 self.expand(second, first.type))
5729
5730 - def SUB(self, first, second):
5731 raise NotImplementedError(self.error_messages["javascript_needed"]) 5732 return '(%s - %s)' % (self.expand(first), 5733 self.expand(second, first.type))
5734
5735 - def MUL(self, first, second):
5736 raise NotImplementedError(self.error_messages["javascript_needed"]) 5737 return '(%s * %s)' % (self.expand(first), 5738 self.expand(second, first.type))
5739
5740 - def DIV(self, first, second):
5741 raise NotImplementedError(self.error_messages["javascript_needed"]) 5742 return '(%s / %s)' % (self.expand(first), 5743 self.expand(second, first.type))
5744
5745 - def MOD(self, first, second):
5746 raise NotImplementedError(self.error_messages["javascript_needed"]) 5747 return '(%s %% %s)' % (self.expand(first), 5748 self.expand(second, first.type))
5749
5750 - def AS(self, first, second):
5751 raise NotImplementedError(self.error_messages["javascript_needed"]) 5752 return '%s AS %s' % (self.expand(first), second)
5753 5754 # We could implement an option that simulates a full featured SQL 5755 # database. But I think the option should be set explicit or 5756 # implemented as another library.
5757 - def ON(self, first, second):
5758 raise NotImplementedError("This is not possible in NoSQL" + 5759 " but can be simulated with a wrapper.") 5760 return '%s ON %s' % (self.expand(first), self.expand(second))
5761 5762 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5763 # WHICH ONE IS BEST? 5764
5765 - def COMMA(self, first, second):
5766 return '%s, %s' % (self.expand(first), self.expand(second))
5767
5768 - def LIKE(self, first, second):
5769 #escaping regex operators? 5770 return {self.expand(first): ('%s' % \ 5771 self.expand(second, 'string').replace('%','/'))}
5772
5773 - def STARTSWITH(self, first, second):
5774 #escaping regex operators? 5775 return {self.expand(first): ('/^%s/' % \ 5776 self.expand(second, 'string'))}
5777
5778 - def ENDSWITH(self, first, second):
5779 #escaping regex operators? 5780 return {self.expand(first): ('/%s^/' % \ 5781 self.expand(second, 'string'))}
5782
5783 - def CONTAINS(self, first, second, case_sensitive=False):
5784 # silently ignore, only case sensitive 5785 # There is a technical difference, but mongodb doesn't support 5786 # that, but the result will be the same 5787 val = second if isinstance(second,self.ObjectId) else \ 5788 {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} 5789 return {self.expand(first) : val}
5790
5791 - def LIKE(self, first, second):
5792 import re 5793 return {self.expand(first): {'$regex': \ 5794 re.escape(self.expand(second, 5795 'string')).replace('%','.*')}}
5796 5797 #TODO verify full compatibilty with official SQL Like operator
5798 - def STARTSWITH(self, first, second):
5799 #TODO Solve almost the same problem as with endswith 5800 import re 5801 return {self.expand(first): {'$regex' : '^' + 5802 re.escape(self.expand(second, 5803 'string'))}}
5804 5805 #TODO verify full compatibilty with official SQL Like operator
5806 - def ENDSWITH(self, first, second):
5807 #escaping regex operators? 5808 #TODO if searched for a name like zsa_corbitt and the function 5809 # is endswith('a') then this is also returned. 5810 # Aldo it end with a t 5811 import re 5812 return {self.expand(first): {'$regex': \ 5813 re.escape(self.expand(second, 'string')) + '$'}}
5814 5815 #TODO verify full compatibilty with official oracle contains operator
5816 - def CONTAINS(self, first, second, case_sensitive=False):
5817 # silently ignore, only case sensitive 5818 #There is a technical difference, but mongodb doesn't support 5819 # that, but the result will be the same 5820 #TODO contains operators need to be transformed to Regex 5821 return {self.expand(first) : {'$regex': \ 5822 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5823
5824 5825 -class IMAPAdapter(NoSQLAdapter):
5826 drivers = ('imaplib',) 5827 5828 """ IMAP server adapter 5829 5830 This class is intended as an interface with 5831 email IMAP servers to perform simple queries in the 5832 web2py DAL query syntax, so email read, search and 5833 other related IMAP mail services (as those implemented 5834 by brands like Google(r), and Yahoo!(r) 5835 can be managed from web2py applications. 5836 5837 The code uses examples by Yuji Tomita on this post: 5838 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5839 and is based in docs for Python imaplib, python email 5840 and email IETF's (i.e. RFC2060 and RFC3501) 5841 5842 This adapter was tested with a small set of operations with Gmail(r). Other 5843 services requests could raise command syntax and response data issues. 5844 5845 It creates its table and field names "statically", 5846 meaning that the developer should leave the table and field 5847 definitions to the DAL instance by calling the adapter's 5848 .define_tables() method. The tables are defined with the 5849 IMAP server mailbox list information. 5850 5851 .define_tables() returns a dictionary mapping dal tablenames 5852 to the server mailbox names with the following structure: 5853 5854 {<tablename>: str <server mailbox name>} 5855 5856 Here is a list of supported fields: 5857 5858 Field Type Description 5859 ################################################################ 5860 uid string 5861 answered boolean Flag 5862 created date 5863 content list:string A list of dict text or html parts 5864 to string 5865 cc string 5866 bcc string 5867 size integer the amount of octets of the message* 5868 deleted boolean Flag 5869 draft boolean Flag 5870 flagged boolean Flag 5871 sender string 5872 recent boolean Flag 5873 seen boolean Flag 5874 subject string 5875 mime string The mime header declaration 5876 email string The complete RFC822 message** 5877 attachments <type list> Each non text part as dict 5878 encoding string The main detected encoding 5879 5880 *At the application side it is measured as the length of the RFC822 5881 message string 5882 5883 WARNING: As row id's are mapped to email sequence numbers, 5884 make sure your imap client web2py app does not delete messages 5885 during select or update actions, to prevent 5886 updating or deleting different messages. 5887 Sequence numbers change whenever the mailbox is updated. 5888 To avoid this sequence numbers issues, it is recommended the use 5889 of uid fields in query references (although the update and delete 5890 in separate actions rule still applies). 5891 5892 # This is the code recommended to start imap support 5893 # at the app's model: 5894 5895 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5896 imapdb.define_tables() 5897 5898 Here is an (incomplete) list of possible imap commands: 5899 5900 # Count today's unseen messages 5901 # smaller than 6000 octets from the 5902 # inbox mailbox 5903 5904 q = imapdb.INBOX.seen == False 5905 q &= imapdb.INBOX.created == datetime.date.today() 5906 q &= imapdb.INBOX.size < 6000 5907 unread = imapdb(q).count() 5908 5909 # Fetch last query messages 5910 rows = imapdb(q).select() 5911 5912 # it is also possible to filter query select results with limitby and 5913 # sequences of mailbox fields 5914 5915 set.select(<fields sequence>, limitby=(<int>, <int>)) 5916 5917 # Mark last query messages as seen 5918 messages = [row.uid for row in rows] 5919 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5920 5921 # Delete messages in the imap database that have mails from mr. Gumby 5922 5923 deleted = 0 5924 for mailbox in imapdb.tables 5925 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5926 5927 # It is possible also to mark messages for deletion instead of ereasing them 5928 # directly with set.update(deleted=True) 5929 5930 5931 # This object give access 5932 # to the adapter auto mailbox 5933 # mapped names (which native 5934 # mailbox has what table name) 5935 5936 imapdb.mailboxes <dict> # tablename, server native name pairs 5937 5938 # To retrieve a table native mailbox name use: 5939 imapdb.<table>.mailbox 5940 5941 ### New features v2.4.1: 5942 5943 # Declare mailboxes statically with tablename, name pairs 5944 # This avoids the extra server names retrieval 5945 5946 imapdb.define_tables({"inbox": "INBOX"}) 5947 5948 # Selects without content/attachments/email columns will only 5949 # fetch header and flags 5950 5951 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5952 """ 5953 5954 types = { 5955 'string': str, 5956 'text': str, 5957 'date': datetime.date, 5958 'datetime': datetime.datetime, 5959 'id': long, 5960 'boolean': bool, 5961 'integer': int, 5962 'bigint': long, 5963 'blob': str, 5964 'list:string': str, 5965 } 5966 5967 dbengine = 'imap' 5968 5969 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5970
5971 - def __init__(self, 5972 db, 5973 uri, 5974 pool_size=0, 5975 folder=None, 5976 db_codec ='UTF-8', 5977 credential_decoder=IDENTITY, 5978 driver_args={}, 5979 adapter_args={}, 5980 do_connect=True, 5981 after_connection=None):
5982 5983 # db uri: user@example.com:password@imap.server.com:123 5984 # TODO: max size adapter argument for preventing large mail transfers 5985 5986 self.db = db 5987 self.uri = uri 5988 if do_connect: self.find_driver(adapter_args) 5989 self.pool_size=pool_size 5990 self.folder = folder 5991 self.db_codec = db_codec 5992 self._after_connection = after_connection 5993 self.credential_decoder = credential_decoder 5994 self.driver_args = driver_args 5995 self.adapter_args = adapter_args 5996 self.mailbox_size = None 5997 self.static_names = None 5998 self.charset = sys.getfilesystemencoding() 5999 # imap class 6000 self.imap4 = None 6001 uri = uri.split("://")[1] 6002 6003 """ MESSAGE is an identifier for sequence number""" 6004 6005 self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 6006 'flagged': '\\Flagged', 'recent': '\\Recent', 6007 'seen': '\\Seen', 'answered': '\\Answered'} 6008 self.search_fields = { 6009 'id': 'MESSAGE', 'created': 'DATE', 6010 'uid': 'UID', 'sender': 'FROM', 6011 'to': 'TO', 'cc': 'CC', 6012 'bcc': 'BCC', 'content': 'TEXT', 6013 'size': 'SIZE', 'deleted': '\\Deleted', 6014 'draft': '\\Draft', 'flagged': '\\Flagged', 6015 'recent': '\\Recent', 'seen': '\\Seen', 6016 'subject': 'SUBJECT', 'answered': '\\Answered', 6017 'mime': None, 'email': None, 6018 'attachments': None 6019 } 6020 6021 db['_lastsql'] = '' 6022 6023 m = self.REGEX_URI.match(uri) 6024 user = m.group('user') 6025 password = m.group('password') 6026 host = m.group('host') 6027 port = int(m.group('port')) 6028 over_ssl = False 6029 if port==993: 6030 over_ssl = True 6031 6032 driver_args.update(host=host,port=port, password=password, user=user) 6033 def connector(driver_args=driver_args): 6034 # it is assumed sucessful authentication alLways 6035 # TODO: support direct connection and login tests 6036 if over_ssl: 6037 self.imap4 = self.driver.IMAP4_SSL 6038 else: 6039 self.imap4 = self.driver.IMAP4 6040 connection = self.imap4(driver_args["host"], driver_args["port"]) 6041 data = connection.login(driver_args["user"], driver_args["password"]) 6042 6043 # static mailbox list 6044 connection.mailbox_names = None 6045 6046 # dummy cursor function 6047 connection.cursor = lambda : True 6048 6049 return connection
6050 6051 self.db.define_tables = self.define_tables 6052 self.connector = connector 6053 if do_connect: self.reconnect()
6054
6055 - def reconnect(self, f=None, cursor=True):
6056 """ 6057 IMAP4 Pool connection method 6058 6059 imap connection lacks of self cursor command. 6060 A custom command should be provided as a replacement 6061 for connection pooling to prevent uncaught remote session 6062 closing 6063 6064 """ 6065 if getattr(self,'connection',None) != None: 6066 return 6067 if f is None: 6068 f = self.connector 6069 6070 if not self.pool_size: 6071 self.connection = f() 6072 self.cursor = cursor and self.connection.cursor() 6073 else: 6074 POOLS = ConnectionPool.POOLS 6075 uri = self.uri 6076 while True: 6077 GLOBAL_LOCKER.acquire() 6078 if not uri in POOLS: 6079 POOLS[uri] = [] 6080 if POOLS[uri]: 6081 self.connection = POOLS[uri].pop() 6082 GLOBAL_LOCKER.release() 6083 self.cursor = cursor and self.connection.cursor() 6084 if self.cursor and self.check_active_connection: 6085 try: 6086 # check if connection is alive or close it 6087 result, data = self.connection.list() 6088 except: 6089 # Possible connection reset error 6090 # TODO: read exception class 6091 self.connection = f() 6092 break 6093 else: 6094 GLOBAL_LOCKER.release() 6095 self.connection = f() 6096 self.cursor = cursor and self.connection.cursor() 6097 break 6098 self.after_connection_hook()
6099
6100 - def get_last_message(self, tablename):
6101 last_message = None 6102 # request mailbox list to the server 6103 # if needed 6104 if not isinstance(self.connection.mailbox_names, dict): 6105 self.get_mailboxes() 6106 try: 6107 result = self.connection.select(self.connection.mailbox_names[tablename]) 6108 last_message = int(result[1][0]) 6109 except (IndexError, ValueError, TypeError, KeyError): 6110 e = sys.exc_info()[1] 6111 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 6112 return last_message
6113
6114 - def get_uid_bounds(self, tablename):
6115 if not isinstance(self.connection.mailbox_names, dict): 6116 self.get_mailboxes() 6117 # fetch first and last messages 6118 # return (first, last) messages uid's 6119 last_message = self.get_last_message(tablename) 6120 result, data = self.connection.uid("search", None, "(ALL)") 6121 uid_list = data[0].strip().split() 6122 if len(uid_list) <= 0: 6123 return None 6124 else: 6125 return (uid_list[0], uid_list[-1])
6126
6127 - def convert_date(self, date, add=None, imf=False):
6128 if add is None: 6129 add = datetime.timedelta() 6130 """ Convert a date object to a string 6131 with d-Mon-Y style for IMAP or the inverse 6132 case 6133 6134 add <timedelta> adds to the date object 6135 """ 6136 months = [None, "JAN","FEB","MAR","APR","MAY","JUN", 6137 "JUL", "AUG","SEP","OCT","NOV","DEC"] 6138 if isinstance(date, basestring): 6139 # Prevent unexpected date response format 6140 try: 6141 if "," in date: 6142 dayname, datestring = date.split(",") 6143 else: 6144 dayname, datestring = None, date 6145 date_list = datestring.strip().split() 6146 year = int(date_list[2]) 6147 month = months.index(date_list[1].upper()) 6148 day = int(date_list[0]) 6149 hms = map(int, date_list[3].split(":")) 6150 return datetime.datetime(year, month, day, 6151 hms[0], hms[1], hms[2]) + add 6152 except (ValueError, AttributeError, IndexError), e: 6153 LOGGER.error("Could not parse date text: %s. %s" % 6154 (date, e)) 6155 return None 6156 elif isinstance(date, (datetime.date, datetime.datetime)): 6157 if imf: date_format = "%a, %d %b %Y %H:%M:%S %z" 6158 else: date_format = "%d-%b-%Y" 6159 return (date + add).strftime(date_format) 6160 else: 6161 return None
6162 6163 @staticmethod
6164 - def header_represent(f, r):
6165 from email.header import decode_header 6166 text, encoding = decode_header(f)[0] 6167 if encoding: 6168 text = text.decode(encoding).encode('utf-8') 6169 return text
6170
6171 - def encode_text(self, text, charset, errors="replace"):
6172 """ convert text for mail to unicode""" 6173 if text is None: 6174 text = "" 6175 else: 6176 if isinstance(text, str): 6177 if charset is None: 6178 text = unicode(text, "utf-8", errors) 6179 else: 6180 text = unicode(text, charset, errors) 6181 else: 6182 raise Exception("Unsupported mail text type %s" % type(text)) 6183 return text.encode("utf-8")
6184
6185 - def get_charset(self, message):
6186 charset = message.get_content_charset() 6187 return charset
6188
6189 - def get_mailboxes(self):
6190 """ Query the mail database for mailbox names """ 6191 if self.static_names: 6192 # statically defined mailbox names 6193 self.connection.mailbox_names = self.static_names 6194 return self.static_names.keys() 6195 6196 mailboxes_list = self.connection.list() 6197 self.connection.mailbox_names = dict() 6198 mailboxes = list() 6199 x = 0 6200 for item in mailboxes_list[1]: 6201 x = x + 1 6202 item = item.strip() 6203 if not "NOSELECT" in item.upper(): 6204 sub_items = item.split("\"") 6205 sub_items = [sub_item for sub_item in sub_items \ 6206 if len(sub_item.strip()) > 0] 6207 # mailbox = sub_items[len(sub_items) -1] 6208 mailbox = sub_items[-1] 6209 # remove unwanted characters and store original names 6210 # Don't allow leading non alphabetic characters 6211 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6212 mailboxes.append(mailbox_name) 6213 self.connection.mailbox_names[mailbox_name] = mailbox 6214 6215 return mailboxes
6216
6217 - def get_query_mailbox(self, query):
6218 nofield = True 6219 tablename = None 6220 attr = query 6221 while nofield: 6222 if hasattr(attr, "first"): 6223 attr = attr.first 6224 if isinstance(attr, Field): 6225 return attr.tablename 6226 elif isinstance(attr, Query): 6227 pass 6228 else: 6229 return None 6230 else: 6231 return None 6232 return tablename
6233
6234 - def is_flag(self, flag):
6235 if self.search_fields.get(flag, None) in self.flags.values(): 6236 return True 6237 else: 6238 return False
6239
6240 - def define_tables(self, mailbox_names=None):
6241 """ 6242 Auto create common IMAP fileds 6243 6244 This function creates fields definitions "statically" 6245 meaning that custom fields as in other adapters should 6246 not be supported and definitions handled on a service/mode 6247 basis (local syntax for Gmail(r), Ymail(r) 6248 6249 Returns a dictionary with tablename, server native mailbox name 6250 pairs. 6251 """ 6252 if mailbox_names: 6253 # optional statically declared mailboxes 6254 self.static_names = mailbox_names 6255 else: 6256 self.static_names = None 6257 if not isinstance(self.connection.mailbox_names, dict): 6258 self.get_mailboxes() 6259 6260 names = self.connection.mailbox_names.keys() 6261 6262 for name in names: 6263 self.db.define_table("%s" % name, 6264 Field("uid", "string", writable=False), 6265 Field("answered", "boolean"), 6266 Field("created", "datetime", writable=False), 6267 Field("content", list, writable=False), 6268 Field("to", "string", writable=False), 6269 Field("cc", "string", writable=False), 6270 Field("bcc", "string", writable=False), 6271 Field("size", "integer", writable=False), 6272 Field("deleted", "boolean"), 6273 Field("draft", "boolean"), 6274 Field("flagged", "boolean"), 6275 Field("sender", "string", writable=False), 6276 Field("recent", "boolean", writable=False), 6277 Field("seen", "boolean"), 6278 Field("subject", "string", writable=False), 6279 Field("mime", "string", writable=False), 6280 Field("email", "string", writable=False, readable=False), 6281 Field("attachments", list, writable=False, readable=False), 6282 Field("encoding", writable=False) 6283 ) 6284 6285 # Set a special _mailbox attribute for storing 6286 # native mailbox names 6287 self.db[name].mailbox = \ 6288 self.connection.mailbox_names[name] 6289 6290 # decode quoted printable 6291 self.db[name].to.represent = self.db[name].cc.represent = \ 6292 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6293 self.db[name].subject.represent = self.header_represent 6294 6295 # Set the db instance mailbox collections 6296 self.db.mailboxes = self.connection.mailbox_names 6297 return self.db.mailboxes
6298
6299 - def create_table(self, *args, **kwargs):
6300 # not implemented 6301 # but required by DAL 6302 pass
6303
6304 - def _select(self, query, fields, attributes):
6305 if use_common_filters(query): 6306 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6307 return str(query)
6308
6309 - def select(self, query, fields, attributes):
6310 """ Search and Fetch records and return web2py rows 6311 """ 6312 # move this statement elsewhere (upper-level) 6313 if use_common_filters(query): 6314 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6315 6316 import email 6317 # get records from imap server with search + fetch 6318 # convert results to a dictionary 6319 tablename = None 6320 fetch_results = list() 6321 6322 if isinstance(query, Query): 6323 tablename = self.get_table(query) 6324 mailbox = self.connection.mailbox_names.get(tablename, None) 6325 if mailbox is None: 6326 raise ValueError("Mailbox name not found: %s" % mailbox) 6327 else: 6328 # select with readonly 6329 result, selected = self.connection.select(mailbox, True) 6330 if result != "OK": 6331 raise Exception("IMAP error: %s" % selected) 6332 self.mailbox_size = int(selected[0]) 6333 search_query = "(%s)" % str(query).strip() 6334 search_result = self.connection.uid("search", None, search_query) 6335 # Normal IMAP response OK is assumed (change this) 6336 if search_result[0] == "OK": 6337 # For "light" remote server responses just get the first 6338 # ten records (change for non-experimental implementation) 6339 # However, light responses are not guaranteed with this 6340 # approach, just fewer messages. 6341 limitby = attributes.get('limitby', None) 6342 messages_set = search_result[1][0].split() 6343 # descending order 6344 messages_set.reverse() 6345 if limitby is not None: 6346 # TODO: orderby, asc/desc, limitby from complete message set 6347 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6348 6349 # keep the requests small for header/flags 6350 if any([(field.name in ["content", "size", 6351 "attachments", "email"]) for 6352 field in fields]): 6353 imap_fields = "(RFC822 FLAGS)" 6354 else: 6355 imap_fields = "(RFC822.HEADER FLAGS)" 6356 6357 if len(messages_set) > 0: 6358 # create fetch results object list 6359 # fetch each remote message and store it in memmory 6360 # (change to multi-fetch command syntax for faster 6361 # transactions) 6362 for uid in messages_set: 6363 # fetch the RFC822 message body 6364 typ, data = self.connection.uid("fetch", uid, imap_fields) 6365 if typ == "OK": 6366 fr = {"message": int(data[0][0].split()[0]), 6367 "uid": long(uid), 6368 "email": email.message_from_string(data[0][1]), 6369 "raw_message": data[0][1]} 6370 fr["multipart"] = fr["email"].is_multipart() 6371 # fetch flags for the message 6372 fr["flags"] = self.driver.ParseFlags(data[1]) 6373 fetch_results.append(fr) 6374 else: 6375 # error retrieving the message body 6376 raise Exception("IMAP error retrieving the body: %s" % data) 6377 else: 6378 raise Exception("IMAP search error: %s" % search_result[1]) 6379 elif isinstance(query, (Expression, basestring)): 6380 raise NotImplementedError() 6381 else: 6382 raise TypeError("Unexpected query type") 6383 6384 imapqry_dict = {} 6385 imapfields_dict = {} 6386 6387 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6388 allfields = True 6389 elif len(fields) == 0: 6390 allfields = True 6391 else: 6392 allfields = False 6393 if allfields: 6394 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6395 else: 6396 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6397 6398 for k in colnames: 6399 imapfields_dict[k] = k 6400 6401 imapqry_list = list() 6402 imapqry_array = list() 6403 for fr in fetch_results: 6404 attachments = [] 6405 content = [] 6406 size = 0 6407 n = int(fr["message"]) 6408 item_dict = dict() 6409 message = fr["email"] 6410 uid = fr["uid"] 6411 charset = self.get_charset(message) 6412 flags = fr["flags"] 6413 raw_message = fr["raw_message"] 6414 # Return messages data mapping static fields 6415 # and fetched results. Mapping should be made 6416 # outside the select function (with auxiliary 6417 # instance methods) 6418 6419 # pending: search flags states trough the email message 6420 # instances for correct output 6421 6422 # preserve subject encoding (ASCII/quoted printable) 6423 6424 if "%s.id" % tablename in colnames: 6425 item_dict["%s.id" % tablename] = n 6426 if "%s.created" % tablename in colnames: 6427 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6428 if "%s.uid" % tablename in colnames: 6429 item_dict["%s.uid" % tablename] = uid 6430 if "%s.sender" % tablename in colnames: 6431 # If there is no encoding found in the message header 6432 # force utf-8 replacing characters (change this to 6433 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6434 item_dict["%s.sender" % tablename] = message["From"] 6435 if "%s.to" % tablename in colnames: 6436 item_dict["%s.to" % tablename] = message["To"] 6437 if "%s.cc" % tablename in colnames: 6438 if "Cc" in message.keys(): 6439 item_dict["%s.cc" % tablename] = message["Cc"] 6440 else: 6441 item_dict["%s.cc" % tablename] = "" 6442 if "%s.bcc" % tablename in colnames: 6443 if "Bcc" in message.keys(): 6444 item_dict["%s.bcc" % tablename] = message["Bcc"] 6445 else: 6446 item_dict["%s.bcc" % tablename] = "" 6447 if "%s.deleted" % tablename in colnames: 6448 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6449 if "%s.draft" % tablename in colnames: 6450 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6451 if "%s.flagged" % tablename in colnames: 6452 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6453 if "%s.recent" % tablename in colnames: 6454 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6455 if "%s.seen" % tablename in colnames: 6456 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6457 if "%s.subject" % tablename in colnames: 6458 item_dict["%s.subject" % tablename] = message["Subject"] 6459 if "%s.answered" % tablename in colnames: 6460 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6461 if "%s.mime" % tablename in colnames: 6462 item_dict["%s.mime" % tablename] = message.get_content_type() 6463 if "%s.encoding" % tablename in colnames: 6464 item_dict["%s.encoding" % tablename] = charset 6465 6466 # Here goes the whole RFC822 body as an email instance 6467 # for controller side custom processing 6468 # The message is stored as a raw string 6469 # >> email.message_from_string(raw string) 6470 # returns a Message object for enhanced object processing 6471 if "%s.email" % tablename in colnames: 6472 # WARNING: no encoding performed (raw message) 6473 item_dict["%s.email" % tablename] = raw_message 6474 6475 # Size measure as suggested in a Velocity Reviews post 6476 # by Tim Williams: "how to get size of email attachment" 6477 # Note: len() and server RFC822.SIZE reports doesn't match 6478 # To retrieve the server size for representation would add a new 6479 # fetch transaction to the process 6480 for part in message.walk(): 6481 maintype = part.get_content_maintype() 6482 if ("%s.attachments" % tablename in colnames) or \ 6483 ("%s.content" % tablename in colnames): 6484 payload = part.get_payload(decode=True) 6485 if payload: 6486 filename = part.get_filename() 6487 values = {"mime": part.get_content_type()} 6488 if ((filename or not "text" in maintype) and 6489 ("%s.attachments" % tablename in colnames)): 6490 values.update({"payload": payload, 6491 "filename": filename, 6492 "encoding": part.get_content_charset(), 6493 "disposition": part["Content-Disposition"]}) 6494 attachments.append(values) 6495 elif (("text" in maintype) and 6496 ("%s.content" % tablename in colnames)): 6497 values.update({"text": self.encode_text(payload, 6498 self.get_charset(part))}) 6499 content.append(values) 6500 6501 if "%s.size" % tablename in colnames: 6502 if part is not None: 6503 size += len(str(part)) 6504 item_dict["%s.content" % tablename] = content 6505 item_dict["%s.attachments" % tablename] = attachments 6506 item_dict["%s.size" % tablename] = size 6507 imapqry_list.append(item_dict) 6508 6509 # extra object mapping for the sake of rows object 6510 # creation (sends an array or lists) 6511 for item_dict in imapqry_list: 6512 imapqry_array_item = list() 6513 for fieldname in colnames: 6514 imapqry_array_item.append(item_dict[fieldname]) 6515 imapqry_array.append(imapqry_array_item) 6516 6517 # parse result and return a rows object 6518 colnames = colnames 6519 processor = attributes.get('processor',self.parse) 6520 return processor(imapqry_array, fields, colnames)
6521
6522 - def _insert(self, table, fields):
6523 def add_payload(message, obj): 6524 payload = Message() 6525 payload.set_charset(obj.get("encoding", "utf-8")) 6526 mime = obj.get("mime", None) 6527 if mime: 6528 payload.set_type(mime) 6529 if "text" in obj: 6530 payload.set_payload(obj["text"]) 6531 elif "payload" in obj: 6532 payload.set_payload(obj["payload"]) 6533 if "filename" in obj and obj["filename"]: 6534 payload.add_header("Content-Disposition", 6535 "attachment", filename=obj["filename"]) 6536 message.attach(payload)
6537 6538 mailbox = table.mailbox 6539 d = dict(((k.name, v) for k, v in fields)) 6540 date_time = d.get("created", datetime.datetime.now()) 6541 struct_time = date_time.timetuple() 6542 if len(d) > 0: 6543 message = d.get("email", None) 6544 attachments = d.get("attachments", []) 6545 content = d.get("content", []) 6546 flags = " ".join(["\\%s" % flag.capitalize() for flag in 6547 ("answered", "deleted", "draft", "flagged", 6548 "recent", "seen") if d.get(flag, False)]) 6549 if not message: 6550 from email.message import Message 6551 mime = d.get("mime", None) 6552 charset = d.get("encoding", None) 6553 message = Message() 6554 message["from"] = d.get("sender", "") 6555 message["subject"] = d.get("subject", "") 6556 message["date"] = self.convert_date(date_time, imf=True) 6557 6558 if mime: 6559 message.set_type(mime) 6560 if charset: 6561 message.set_charset(charset) 6562 for item in ("to", "cc", "bcc"): 6563 value = d.get(item, "") 6564 if isinstance(value, basestring): 6565 message[item] = value 6566 else: 6567 message[item] = ";".join([i for i in 6568 value]) 6569 if (not message.is_multipart() and 6570 (not message.get_content_type().startswith( 6571 "multipart"))): 6572 if isinstance(content, basestring): 6573 message.set_payload(content) 6574 elif len(content) > 0: 6575 message.set_payload(content[0]["text"]) 6576 else: 6577 [add_payload(message, c) for c in content] 6578 [add_payload(message, a) for a in attachments] 6579 message = message.as_string() 6580 return (mailbox, flags, struct_time, message) 6581 else: 6582 raise NotImplementedError("IMAP empty insert is not implemented") 6583
6584 - def insert(self, table, fields):
6585 values = self._insert(table, fields) 6586 result, data = self.connection.append(*values) 6587 if result == "OK": 6588 uid = int(re.findall("\d+", str(data))[-1]) 6589 return self.db(table.uid==uid).select(table.id).first().id 6590 else: 6591 raise Exception("IMAP message append failed: %s" % data)
6592
6593 - def _update(self, tablename, query, fields, commit=False):
6594 # TODO: the adapter should implement an .expand method 6595 commands = list() 6596 if use_common_filters(query): 6597 query = self.common_filter(query, [tablename,]) 6598 mark = [] 6599 unmark = [] 6600 if query: 6601 for item in fields: 6602 field = item[0] 6603 name = field.name 6604 value = item[1] 6605 if self.is_flag(name): 6606 flag = self.search_fields[name] 6607 if (value is not None) and (flag != "\\Recent"): 6608 if value: 6609 mark.append(flag) 6610 else: 6611 unmark.append(flag) 6612 result, data = self.connection.select( 6613 self.connection.mailbox_names[tablename]) 6614 string_query = "(%s)" % query 6615 result, data = self.connection.search(None, string_query) 6616 store_list = [item.strip() for item in data[0].split() 6617 if item.strip().isdigit()] 6618 # build commands for marked flags 6619 for number in store_list: 6620 result = None 6621 if len(mark) > 0: 6622 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6623 if len(unmark) > 0: 6624 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6625 return commands
6626
6627 - def update(self, tablename, query, fields):
6628 rowcount = 0 6629 commands = self._update(tablename, query, fields) 6630 for command in commands: 6631 result, data = self.connection.store(*command) 6632 if result == "OK": 6633 rowcount += 1 6634 else: 6635 raise Exception("IMAP storing error: %s" % data) 6636 return rowcount
6637
6638 - def _count(self, query, distinct=None):
6639 raise NotImplementedError()
6640
6641 - def count(self,query,distinct=None):
6642 counter = 0 6643 tablename = self.get_query_mailbox(query) 6644 if query and tablename is not None: 6645 if use_common_filters(query): 6646 query = self.common_filter(query, [tablename,]) 6647 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6648 string_query = "(%s)" % query 6649 result, data = self.connection.search(None, string_query) 6650 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6651 counter = len(store_list) 6652 return counter
6653
6654 - def delete(self, tablename, query):
6655 counter = 0 6656 if query: 6657 if use_common_filters(query): 6658 query = self.common_filter(query, [tablename,]) 6659 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6660 string_query = "(%s)" % query 6661 result, data = self.connection.search(None, string_query) 6662 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6663 for number in store_list: 6664 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6665 if result == "OK": 6666 counter += 1 6667 else: 6668 raise Exception("IMAP store error: %s" % data) 6669 if counter > 0: 6670 result, data = self.connection.expunge() 6671 return counter
6672
6673 - def BELONGS(self, first, second):
6674 result = None 6675 name = self.search_fields[first.name] 6676 if name == "MESSAGE": 6677 values = [str(val) for val in second if str(val).isdigit()] 6678 result = "%s" % ",".join(values).strip() 6679 6680 elif name == "UID": 6681 values = [str(val) for val in second if str(val).isdigit()] 6682 result = "UID %s" % ",".join(values).strip() 6683 6684 else: 6685 raise Exception("Operation not supported") 6686 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6687 return result
6688
6689 - def CONTAINS(self, first, second, case_sensitive=False):
6690 # silently ignore, only case sensitive 6691 result = None 6692 name = self.search_fields[first.name] 6693 6694 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6695 result = "%s \"%s\"" % (name, self.expand(second)) 6696 else: 6697 if first.name in ("cc", "bcc"): 6698 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6699 elif first.name == "mime": 6700 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6701 else: 6702 raise Exception("Operation not supported") 6703 return result
6704
6705 - def GT(self, first, second):
6706 result = None 6707 name = self.search_fields[first.name] 6708 if name == "MESSAGE": 6709 last_message = self.get_last_message(first.tablename) 6710 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6711 elif name == "UID": 6712 # GT and LT may not return 6713 # expected sets depending on 6714 # the uid format implemented 6715 try: 6716 pedestal, threshold = self.get_uid_bounds(first.tablename) 6717 except TypeError: 6718 e = sys.exc_info()[1] 6719 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6720 return "" 6721 try: 6722 lower_limit = int(self.expand(second)) + 1 6723 except (ValueError, TypeError): 6724 e = sys.exc_info()[1] 6725 raise Exception("Operation not supported (non integer UID)") 6726 result = "UID %s:%s" % (lower_limit, threshold) 6727 elif name == "DATE": 6728 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6729 elif name == "SIZE": 6730 result = "LARGER %s" % self.expand(second) 6731 else: 6732 raise Exception("Operation not supported") 6733 return result
6734
6735 - def GE(self, first, second):
6736 result = None 6737 name = self.search_fields[first.name] 6738 if name == "MESSAGE": 6739 last_message = self.get_last_message(first.tablename) 6740 result = "%s:%s" % (self.expand(second), last_message) 6741 elif name == "UID": 6742 # GT and LT may not return 6743 # expected sets depending on 6744 # the uid format implemented 6745 try: 6746 pedestal, threshold = self.get_uid_bounds(first.tablename) 6747 except TypeError: 6748 e = sys.exc_info()[1] 6749 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6750 return "" 6751 lower_limit = self.expand(second) 6752 result = "UID %s:%s" % (lower_limit, threshold) 6753 elif name == "DATE": 6754 result = "SINCE %s" % self.convert_date(second) 6755 else: 6756 raise Exception("Operation not supported") 6757 return result
6758
6759 - def LT(self, first, second):
6760 result = None 6761 name = self.search_fields[first.name] 6762 if name == "MESSAGE": 6763 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6764 elif name == "UID": 6765 try: 6766 pedestal, threshold = self.get_uid_bounds(first.tablename) 6767 except TypeError: 6768 e = sys.exc_info()[1] 6769 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6770 return "" 6771 try: 6772 upper_limit = int(self.expand(second)) - 1 6773 except (ValueError, TypeError): 6774 e = sys.exc_info()[1] 6775 raise Exception("Operation not supported (non integer UID)") 6776 result = "UID %s:%s" % (pedestal, upper_limit) 6777 elif name == "DATE": 6778 result = "BEFORE %s" % self.convert_date(second) 6779 elif name == "SIZE": 6780 result = "SMALLER %s" % self.expand(second) 6781 else: 6782 raise Exception("Operation not supported") 6783 return result
6784
6785 - def LE(self, first, second):
6786 result = None 6787 name = self.search_fields[first.name] 6788 if name == "MESSAGE": 6789 result = "%s:%s" % (1, self.expand(second)) 6790 elif name == "UID": 6791 try: 6792 pedestal, threshold = self.get_uid_bounds(first.tablename) 6793 except TypeError: 6794 e = sys.exc_info()[1] 6795 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6796 return "" 6797 upper_limit = int(self.expand(second)) 6798 result = "UID %s:%s" % (pedestal, upper_limit) 6799 elif name == "DATE": 6800 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6801 else: 6802 raise Exception("Operation not supported") 6803 return result
6804
6805 - def NE(self, first, second=None):
6806 if (second is None) and isinstance(first, Field): 6807 # All records special table query 6808 if first.type == "id": 6809 return self.GE(first, 1) 6810 result = self.NOT(self.EQ(first, second)) 6811 result = result.replace("NOT NOT", "").strip() 6812 return result
6813
6814 - def EQ(self,first,second):
6815 name = self.search_fields[first.name] 6816 result = None 6817 if name is not None: 6818 if name == "MESSAGE": 6819 # query by message sequence number 6820 result = "%s" % self.expand(second) 6821 elif name == "UID": 6822 result = "UID %s" % self.expand(second) 6823 elif name == "DATE": 6824 result = "ON %s" % self.convert_date(second) 6825 6826 elif name in self.flags.values(): 6827 if second: 6828 result = "%s" % (name.upper()[1:]) 6829 else: 6830 result = "NOT %s" % (name.upper()[1:]) 6831 else: 6832 raise Exception("Operation not supported") 6833 else: 6834 raise Exception("Operation not supported") 6835 return result
6836
6837 - def AND(self, first, second):
6838 result = "%s %s" % (self.expand(first), self.expand(second)) 6839 return result
6840
6841 - def OR(self, first, second):
6842 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6843 return "%s" % result.replace("OR OR", "OR")
6844
6845 - def NOT(self, first):
6846 result = "NOT %s" % self.expand(first) 6847 return result
6848 6849 ######################################################################## 6850 # end of adapters 6851 ######################################################################## 6852 6853 ADAPTERS = { 6854 'sqlite': SQLiteAdapter, 6855 'spatialite': SpatiaLiteAdapter, 6856 'sqlite:memory': SQLiteAdapter, 6857 'spatialite:memory': SpatiaLiteAdapter, 6858 'mysql': MySQLAdapter, 6859 'postgres': PostgreSQLAdapter, 6860 'postgres:psycopg2': PostgreSQLAdapter, 6861 'postgres:pg8000': PostgreSQLAdapter, 6862 'postgres2:psycopg2': NewPostgreSQLAdapter, 6863 'postgres2:pg8000': NewPostgreSQLAdapter, 6864 'oracle': OracleAdapter, 6865 'mssql': MSSQLAdapter, 6866 'mssql2': MSSQL2Adapter, 6867 'mssql3': MSSQL3Adapter, 6868 'vertica': VerticaAdapter, 6869 'sybase': SybaseAdapter, 6870 'db2': DB2Adapter, 6871 'teradata': TeradataAdapter, 6872 'informix': InformixAdapter, 6873 'informix-se': InformixSEAdapter, 6874 'firebird': FireBirdAdapter, 6875 'firebird_embedded': FireBirdAdapter, 6876 'ingres': IngresAdapter, 6877 'ingresu': IngresUnicodeAdapter, 6878 'sapdb': SAPDBAdapter, 6879 'cubrid': CubridAdapter, 6880 'jdbc:sqlite': JDBCSQLiteAdapter, 6881 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6882 'jdbc:postgres': JDBCPostgreSQLAdapter, 6883 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6884 'google:datastore': GoogleDatastoreAdapter, 6885 'google:sql': GoogleSQLAdapter, 6886 'couchdb': CouchDBAdapter, 6887 'mongodb': MongoDBAdapter, 6888 'imap': IMAPAdapter 6889 }
6890 6891 -def sqlhtml_validators(field):
6892 """ 6893 Field type validation, using web2py's validators mechanism. 6894 6895 makes sure the content of a field is in line with the declared 6896 fieldtype 6897 """ 6898 db = field.db 6899 try: 6900 from gluon import validators 6901 except ImportError: 6902 return [] 6903 field_type, field_length = field.type, field.length 6904 if isinstance(field_type, SQLCustomType): 6905 if hasattr(field_type, 'validator'): 6906 return field_type.validator 6907 else: 6908 field_type = field_type.type 6909 elif not isinstance(field_type,str): 6910 return [] 6911 requires=[] 6912 def ff(r,id): 6913 row=r(id) 6914 if not row: 6915 return id 6916 elif hasattr(r, '_format') and isinstance(r._format,str): 6917 return r._format % row 6918 elif hasattr(r, '_format') and callable(r._format): 6919 return r._format(row) 6920 else: 6921 return id
6922 if field_type in (('string', 'text', 'password')): 6923 requires.append(validators.IS_LENGTH(field_length)) 6924 elif field_type == 'json': 6925 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON(native_json=field.db._adapter.native_json))) 6926 elif field_type == 'double' or field_type == 'float': 6927 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6928 elif field_type in ('integer','bigint'): 6929 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6930 elif field_type.startswith('decimal'): 6931 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6932 elif field_type == 'date': 6933 requires.append(validators.IS_DATE()) 6934 elif field_type == 'time': 6935 requires.append(validators.IS_TIME()) 6936 elif field_type == 'datetime': 6937 requires.append(validators.IS_DATETIME()) 6938 elif db and field_type.startswith('reference') and \ 6939 field_type.find('.') < 0 and \ 6940 field_type[10:] in db.tables: 6941 referenced = db[field_type[10:]] 6942 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6943 field.represent = field.represent or repr_ref 6944 if hasattr(referenced, '_format') and referenced._format: 6945 requires = validators.IS_IN_DB(db,referenced._id, 6946 referenced._format) 6947 if field.unique: 6948 requires._and = validators.IS_NOT_IN_DB(db,field) 6949 if field.tablename == field_type[10:]: 6950 return validators.IS_EMPTY_OR(requires) 6951 return requires 6952 elif db and field_type.startswith('list:reference') and \ 6953 field_type.find('.') < 0 and \ 6954 field_type[15:] in db.tables: 6955 referenced = db[field_type[15:]] 6956 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6957 if not ids: 6958 return None 6959 refs = None 6960 db, id = r._db, r._id 6961 if isinstance(db._adapter, GoogleDatastoreAdapter): 6962 def count(values): return db(id.belongs(values)).select(id) 6963 rx = range(0, len(ids), 30) 6964 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6965 else: 6966 refs = db(id.belongs(ids)).select(id) 6967 return (refs and ', '.join(f(r,x.id) for x in refs) or '') 6968 field.represent = field.represent or list_ref_repr 6969 if hasattr(referenced, '_format') and referenced._format: 6970 requires = validators.IS_IN_DB(db,referenced._id, 6971 referenced._format,multiple=True) 6972 else: 6973 requires = validators.IS_IN_DB(db,referenced._id, 6974 multiple=True) 6975 if field.unique: 6976 requires._and = validators.IS_NOT_IN_DB(db,field) 6977 if not field.notnull: 6978 requires = validators.IS_EMPTY_OR(requires) 6979 return requires 6980 elif field_type.startswith('list:'): 6981 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6982 field.represent = field.represent or repr_list 6983 if field.unique: 6984 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6985 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6986 if field.notnull and not field_type[:2] in sff: 6987 requires.insert(0, validators.IS_NOT_EMPTY()) 6988 elif not field.notnull and field_type[:2] in sff and requires: 6989 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6990 return requires 6991
6992 6993 -def bar_escape(item):
6994 return str(item).replace('|', '||')
6995
6996 -def bar_encode(items):
6997 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6998
6999 -def bar_decode_integer(value):
7000 if not hasattr(value,'split') and hasattr(value,'read'): 7001 value = value.read() 7002 return [long(x) for x in value.split('|') if x.strip()]
7003
7004 -def bar_decode_string(value):
7005 return [x.replace('||', '|') for x in 7006 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
7007
7008 7009 -class Row(object):
7010 7011 """ 7012 a dictionary that lets you do d['a'] as well as d.a 7013 this is only used to store a Row 7014 """ 7015 7016 __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) 7017
7018 - def __getitem__(self, k):
7019 key=str(k) 7020 _extra = self.__dict__.get('_extra', None) 7021 if _extra is not None: 7022 v = _extra.get(key, DEFAULT) 7023 if v != DEFAULT: 7024 return v 7025 m = REGEX_TABLE_DOT_FIELD.match(key) 7026 if m: 7027 try: 7028 return ogetattr(self, m.group(1))[m.group(2)] 7029 except (KeyError,AttributeError,TypeError): 7030 key = m.group(2) 7031 try: 7032 return ogetattr(self, key) 7033 except (KeyError,AttributeError,TypeError), ae: 7034 try: 7035 self[key] = ogetattr(self,'__get_lazy_reference__')(key) 7036 return self[key] 7037 except: 7038 raise ae
7039 7040 __setitem__ = lambda self, key, value: setattr(self, str(key), value) 7041 7042 __delitem__ = object.__delattr__ 7043 7044 __copy__ = lambda self: Row(self) 7045 7046 __call__ = __getitem__ 7047 7048
7049 - def get(self, key, default=None):
7050 try: 7051 return self.__getitem__(key) 7052 except(KeyError, AttributeError, TypeError): 7053 return self.__dict__.get(key,default)
7054 7055 has_key = __contains__ = lambda self, key: key in self.__dict__ 7056 7057 __nonzero__ = lambda self: len(self.__dict__)>0 7058 7059 update = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) 7060 7061 keys = lambda self: self.__dict__.keys() 7062 7063 items = lambda self: self.__dict__.items() 7064 7065 values = lambda self: self.__dict__.values() 7066 7067 __iter__ = lambda self: self.__dict__.__iter__() 7068 7069 iteritems = lambda self: self.__dict__.iteritems() 7070 7071 __str__ = __repr__ = lambda self: '<Row %s>' % self.as_dict() 7072 7073 __int__ = lambda self: object.__getattribute__(self,'id') 7074 7075 __long__ = lambda self: long(object.__getattribute__(self,'id')) 7076 7077 __getattr__ = __getitem__ 7078 7079 # def __getattribute__(self, key): 7080 # try: 7081 # return object.__getattribute__(self, key) 7082 # except AttributeError, ae: 7083 # try: 7084 # return self.__get_lazy_reference__(key) 7085 # except: 7086 # raise ae 7087
7088 - def __eq__(self,other):
7089 try: 7090 return self.as_dict() == other.as_dict() 7091 except AttributeError: 7092 return False
7093
7094 - def __ne__(self,other):
7095 return not (self == other)
7096
7097 - def __copy__(self):
7098 return Row(dict(self))
7099
7100 - def as_dict(self, datetime_to_str=False, custom_types=None):
7101 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 7102 if isinstance(custom_types,(list,tuple,set)): 7103 SERIALIZABLE_TYPES += custom_types 7104 elif custom_types: 7105 SERIALIZABLE_TYPES.append(custom_types) 7106 d = dict(self) 7107 for k in copy.copy(d.keys()): 7108 v=d[k] 7109 if d[k] is None: 7110 continue 7111 elif isinstance(v,Row): 7112 d[k]=v.as_dict() 7113 elif isinstance(v,Reference): 7114 d[k]=long(v) 7115 elif isinstance(v,decimal.Decimal): 7116 d[k]=float(v) 7117 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 7118 if datetime_to_str: 7119 d[k] = v.isoformat().replace('T',' ')[:19] 7120 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 7121 del d[k] 7122 return d
7123
7124 - def as_xml(self, row_name="row", colnames=None, indent=' '):
7125 def f(row,field,indent=' '): 7126 if isinstance(row,Row): 7127 spc = indent+' \n' 7128 items = [f(row[x],x,indent+' ') for x in row] 7129 return '%s<%s>\n%s\n%s</%s>' % ( 7130 indent, 7131 field, 7132 spc.join(item for item in items if item), 7133 indent, 7134 field) 7135 elif not callable(row): 7136 if REGEX_ALPHANUMERIC.match(field): 7137 return '%s<%s>%s</%s>' % (indent,field,row,field) 7138 else: 7139 return '%s<extra name="%s">%s</extra>' % \ 7140 (indent,field,row) 7141 else: 7142 return None
7143 return f(self, row_name, indent=indent)
7144
7145 - def as_json(self, mode="object", default=None, colnames=None, 7146 serialize=True, **kwargs):
7147 """ 7148 serializes the row to a JSON object 7149 kwargs are passed to .as_dict method 7150 only "object" mode supported 7151 7152 serialize = False used by Rows.as_json 7153 TODO: return array mode with query column order 7154 7155 mode and colnames are not implemented 7156 """ 7157 7158 item = self.as_dict(**kwargs) 7159 if serialize: 7160 if have_serializers: 7161 return serializers.json(item, 7162 default=default or 7163 serializers.custom_json) 7164 elif simplejson: 7165 return simplejson.dumps(item) 7166 else: 7167 raise RuntimeError("missing simplejson") 7168 else: 7169 return item
7170
7171 7172 ################################################################################ 7173 # Everything below should be independent of the specifics of the database 7174 # and should work for RDBMs and some NoSQL databases 7175 ################################################################################ 7176 7177 -class SQLCallableList(list):
7178 - def __call__(self):
7179 return copy.copy(self)
7180
7181 -def smart_query(fields,text):
7182 if not isinstance(fields,(list,tuple)): 7183 fields = [fields] 7184 new_fields = [] 7185 for field in fields: 7186 if isinstance(field,Field): 7187 new_fields.append(field) 7188 elif isinstance(field,Table): 7189 for ofield in field: 7190 new_fields.append(ofield) 7191 else: 7192 raise RuntimeError("fields must be a list of fields") 7193 fields = new_fields 7194 field_map = {} 7195 for field in fields: 7196 n = field.name.lower() 7197 if not n in field_map: 7198 field_map[n] = field 7199 n = str(field).lower() 7200 if not n in field_map: 7201 field_map[n] = field 7202 constants = {} 7203 i = 0 7204 while True: 7205 m = REGEX_CONST_STRING.search(text) 7206 if not m: break 7207 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7208 constants[str(i)] = m.group()[1:-1] 7209 i+=1 7210 text = re.sub('\s+',' ',text).lower() 7211 for a,b in [('&','and'), 7212 ('|','or'), 7213 ('~','not'), 7214 ('==','='), 7215 ('<','<'), 7216 ('>','>'), 7217 ('<=','<='), 7218 ('>=','>='), 7219 ('<>','!='), 7220 ('=<','<='), 7221 ('=>','>='), 7222 ('=','='), 7223 (' less or equal than ','<='), 7224 (' greater or equal than ','>='), 7225 (' equal or less than ','<='), 7226 (' equal or greater than ','>='), 7227 (' less or equal ','<='), 7228 (' greater or equal ','>='), 7229 (' equal or less ','<='), 7230 (' equal or greater ','>='), 7231 (' not equal to ','!='), 7232 (' not equal ','!='), 7233 (' equal to ','='), 7234 (' equal ','='), 7235 (' equals ','='), 7236 (' less than ','<'), 7237 (' greater than ','>'), 7238 (' starts with ','startswith'), 7239 (' ends with ','endswith'), 7240 (' not in ' , 'notbelongs'), 7241 (' in ' , 'belongs'), 7242 (' is ','=')]: 7243 if a[0]==' ': 7244 text = text.replace(' is'+a,' %s ' % b) 7245 text = text.replace(a,' %s ' % b) 7246 text = re.sub('\s+',' ',text).lower() 7247 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7248 query = field = neg = op = logic = None 7249 for item in text.split(): 7250 if field is None: 7251 if item == 'not': 7252 neg = True 7253 elif not neg and not logic and item in ('and','or'): 7254 logic = item 7255 elif item in field_map: 7256 field = field_map[item] 7257 else: 7258 raise RuntimeError("Invalid syntax") 7259 elif not field is None and op is None: 7260 op = item 7261 elif not op is None: 7262 if item.startswith('#'): 7263 if not item[1:] in constants: 7264 raise RuntimeError("Invalid syntax") 7265 value = constants[item[1:]] 7266 else: 7267 value = item 7268 if field.type in ('text', 'string', 'json'): 7269 if op == '=': op = 'like' 7270 if op == '=': new_query = field==value 7271 elif op == '<': new_query = field<value 7272 elif op == '>': new_query = field>value 7273 elif op == '<=': new_query = field<=value 7274 elif op == '>=': new_query = field>=value 7275 elif op == '!=': new_query = field!=value 7276 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7277 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7278 elif field.type in ('text', 'string', 'json'): 7279 if op == 'contains': new_query = field.contains(value) 7280 elif op == 'like': new_query = field.like(value) 7281 elif op == 'startswith': new_query = field.startswith(value) 7282 elif op == 'endswith': new_query = field.endswith(value) 7283 else: raise RuntimeError("Invalid operation") 7284 elif field._db._adapter.dbengine=='google:datastore' and \ 7285 field.type in ('list:integer', 'list:string', 'list:reference'): 7286 if op == 'contains': new_query = field.contains(value) 7287 else: raise RuntimeError("Invalid operation") 7288 else: raise RuntimeError("Invalid operation") 7289 if neg: new_query = ~new_query 7290 if query is None: 7291 query = new_query 7292 elif logic == 'and': 7293 query &= new_query 7294 elif logic == 'or': 7295 query |= new_query 7296 field = op = neg = logic = None 7297 return query
7298
7299 -class DAL(object):
7300 7301 """ 7302 an instance of this class represents a database connection 7303 7304 Example:: 7305 7306 db = DAL('sqlite://test.db') 7307 7308 or 7309 7310 db = DAL(**{"uri": ..., "tables": [...]...}) # experimental 7311 7312 db.define_table('tablename', Field('fieldname1'), 7313 Field('fieldname2')) 7314 """ 7315
7316 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7317 if not hasattr(THREAD_LOCAL,'db_instances'): 7318 THREAD_LOCAL.db_instances = {} 7319 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7320 THREAD_LOCAL.db_instances_zombie = {} 7321 if uri == '<zombie>': 7322 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7323 if db_uid in THREAD_LOCAL.db_instances: 7324 db_group = THREAD_LOCAL.db_instances[db_uid] 7325 db = db_group[-1] 7326 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7327 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7328 else: 7329 db = super(DAL, cls).__new__(cls) 7330 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7331 else: 7332 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7333 if db_uid in THREAD_LOCAL.db_instances_zombie: 7334 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7335 del THREAD_LOCAL.db_instances_zombie[db_uid] 7336 else: 7337 db = super(DAL, cls).__new__(cls) 7338 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7339 db_group.append(db) 7340 THREAD_LOCAL.db_instances[db_uid] = db_group 7341 db._db_uid = db_uid 7342 return db
7343 7344 @staticmethod
7345 - def set_folder(folder):
7346 """ 7347 # ## this allows gluon to set a folder for this thread 7348 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7349 """ 7350 BaseAdapter.set_folder(folder)
7351 7352 @staticmethod
7353 - def get_instances():
7354 """ 7355 Returns a dictionary with uri as key with timings and defined tables 7356 {'sqlite://storage.sqlite': { 7357 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7358 'dbtables': { 7359 'defined': ['auth_cas', 'auth_event', 'auth_group', 7360 'auth_membership', 'auth_permission', 'auth_user'], 7361 'lazy': '[]' 7362 } 7363 } 7364 } 7365 """ 7366 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7367 infos = {} 7368 for db_uid, db_group in dbs: 7369 for db in db_group: 7370 if not db._uri: 7371 continue 7372 k = hide_password(db._uri) 7373 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7374 dbtables = {'defined': 7375 sorted(list(set(db.tables) - 7376 set(db._LAZY_TABLES.keys()))), 7377 'lazy': sorted(db._LAZY_TABLES.keys())} 7378 ) 7379 return infos
7380 7381 @staticmethod
7382 - def distributed_transaction_begin(*instances):
7383 if not instances: 7384 return 7385 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7386 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7387 instances = enumerate(instances) 7388 for (i, db) in instances: 7389 if not db._adapter.support_distributed_transaction(): 7390 raise SyntaxError( 7391 'distributed transaction not suported by %s' % db._dbname) 7392 for (i, db) in instances: 7393 db._adapter.distributed_transaction_begin(keys[i])
7394 7395 @staticmethod
7396 - def distributed_transaction_commit(*instances):
7397 if not instances: 7398 return 7399 instances = enumerate(instances) 7400 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7401 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7402 for (i, db) in instances: 7403 if not db._adapter.support_distributed_transaction(): 7404 raise SyntaxError( 7405 'distributed transaction not suported by %s' % db._dbanme) 7406 try: 7407 for (i, db) in instances: 7408 db._adapter.prepare(keys[i]) 7409 except: 7410 for (i, db) in instances: 7411 db._adapter.rollback_prepared(keys[i]) 7412 raise RuntimeError('failure to commit distributed transaction') 7413 else: 7414 for (i, db) in instances: 7415 db._adapter.commit_prepared(keys[i]) 7416 return
7417
7418 - def __init__(self, uri=DEFAULT_URI, 7419 pool_size=0, folder=None, 7420 db_codec='UTF-8', check_reserved=None, 7421 migrate=True, fake_migrate=False, 7422 migrate_enabled=True, fake_migrate_all=False, 7423 decode_credentials=False, driver_args=None, 7424 adapter_args=None, attempts=5, auto_import=False, 7425 bigint_id=False, debug=False, lazy_tables=False, 7426 db_uid=None, do_connect=True, 7427 after_connection=None, tables=None):
7428 """ 7429 Creates a new Database Abstraction Layer instance. 7430 7431 Keyword arguments: 7432 7433 :uri: string that contains information for connecting to a database. 7434 (default: 'sqlite://dummy.db') 7435 7436 experimental: you can specify a dictionary as uri 7437 parameter i.e. with 7438 db = DAL({"uri": "sqlite://storage.sqlite", 7439 "tables": {...}, ...}) 7440 7441 for an example of dict input you can check the output 7442 of the scaffolding db model with 7443 7444 db.as_dict() 7445 7446 Note that for compatibility with Python older than 7447 version 2.6.5 you should cast your dict input keys 7448 to str due to a syntax limitation on kwarg names. 7449 for proper DAL dictionary input you can use one of: 7450 7451 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7452 7453 or else (for parsing json input) 7454 7455 obj = serializers.loads_json(data, unicode_keys=False) 7456 7457 :pool_size: How many open connections to make to the database object. 7458 :folder: where .table files will be created. 7459 automatically set within web2py 7460 use an explicit path when using DAL outside web2py 7461 :db_codec: string encoding of the database (default: 'UTF-8') 7462 :check_reserved: list of adapters to check tablenames and column names 7463 against sql/nosql reserved keywords. (Default None) 7464 7465 * 'common' List of sql keywords that are common to all database types 7466 such as "SELECT, INSERT". (recommended) 7467 * 'all' Checks against all known SQL keywords. (not recommended) 7468 <adaptername> Checks against the specific adapters list of keywords 7469 (recommended) 7470 * '<adaptername>_nonreserved' Checks against the specific adapters 7471 list of nonreserved keywords. (if available) 7472 :migrate (defaults to True) sets default migrate behavior for all tables 7473 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7474 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7475 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7476 :attempts (defaults to 5). Number of times to attempt connecting 7477 :auto_import (defaults to False). If set, import automatically table definitions from the 7478 databases folder 7479 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7480 :lazy_tables (defaults to False): delay table definition until table access 7481 :after_connection (defaults to None): a callable that will be execute after the connection 7482 """ 7483 if uri == '<zombie>' and db_uid is not None: return 7484 if not decode_credentials: 7485 credential_decoder = lambda cred: cred 7486 else: 7487 credential_decoder = lambda cred: urllib.unquote(cred) 7488 self._folder = folder 7489 if folder: 7490 self.set_folder(folder) 7491 self._uri = uri 7492 self._pool_size = pool_size 7493 self._db_codec = db_codec 7494 self._lastsql = '' 7495 self._timings = [] 7496 self._pending_references = {} 7497 self._request_tenant = 'request_tenant' 7498 self._common_fields = [] 7499 self._referee_name = '%(table)s' 7500 self._bigint_id = bigint_id 7501 self._debug = debug 7502 self._migrated = [] 7503 self._LAZY_TABLES = {} 7504 self._lazy_tables = lazy_tables 7505 self._tables = SQLCallableList() 7506 self._driver_args = driver_args 7507 self._adapter_args = adapter_args 7508 self._check_reserved = check_reserved 7509 self._decode_credentials = decode_credentials 7510 self._attempts = attempts 7511 self._do_connect = do_connect 7512 7513 if not str(attempts).isdigit() or attempts < 0: 7514 attempts = 5 7515 if uri: 7516 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7517 error = '' 7518 connected = False 7519 for k in range(attempts): 7520 for uri in uris: 7521 try: 7522 if is_jdbc and not uri.startswith('jdbc:'): 7523 uri = 'jdbc:'+uri 7524 self._dbname = REGEX_DBNAME.match(uri).group() 7525 if not self._dbname in ADAPTERS: 7526 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7527 # notice that driver args or {} else driver_args 7528 # defaults to {} global, not correct 7529 kwargs = dict(db=self,uri=uri, 7530 pool_size=pool_size, 7531 folder=folder, 7532 db_codec=db_codec, 7533 credential_decoder=credential_decoder, 7534 driver_args=driver_args or {}, 7535 adapter_args=adapter_args or {}, 7536 do_connect=do_connect, 7537 after_connection=after_connection) 7538 self._adapter = ADAPTERS[self._dbname](**kwargs) 7539 types = ADAPTERS[self._dbname].types 7540 # copy so multiple DAL() possible 7541 self._adapter.types = copy.copy(types) 7542 self._adapter.build_parsemap() 7543 if bigint_id: 7544 if 'big-id' in types and 'reference' in types: 7545 self._adapter.types['id'] = types['big-id'] 7546 self._adapter.types['reference'] = types['big-reference'] 7547 connected = True 7548 break 7549 except SyntaxError: 7550 raise 7551 except Exception: 7552 tb = traceback.format_exc() 7553 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7554 if connected: 7555 break 7556 else: 7557 time.sleep(1) 7558 if not connected: 7559 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7560 else: 7561 self._adapter = BaseAdapter(db=self,pool_size=0, 7562 uri='None',folder=folder, 7563 db_codec=db_codec, after_connection=after_connection) 7564 migrate = fake_migrate = False 7565 adapter = self._adapter 7566 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7567 self.check_reserved = check_reserved 7568 if self.check_reserved: 7569 from reserved_sql_keywords import ADAPTERS as RSK 7570 self.RSK = RSK 7571 self._migrate = migrate 7572 self._fake_migrate = fake_migrate 7573 self._migrate_enabled = migrate_enabled 7574 self._fake_migrate_all = fake_migrate_all 7575 if auto_import or tables: 7576 self.import_table_definitions(adapter.folder, 7577 tables=tables)
7578 7579 @property
7580 - def tables(self):
7581 return self._tables
7582
7583 - def import_table_definitions(self, path, migrate=False, 7584 fake_migrate=False, tables=None):
7585 pattern = pjoin(path,self._uri_hash+'_*.table') 7586 if tables: 7587 for table in tables: 7588 self.define_table(**table) 7589 else: 7590 for filename in glob.glob(pattern): 7591 tfile = self._adapter.file_open(filename, 'r') 7592 try: 7593 sql_fields = pickle.load(tfile) 7594 name = filename[len(pattern)-7:-6] 7595 mf = [(value['sortable'], 7596 Field(key, 7597 type=value['type'], 7598 length=value.get('length',None), 7599 notnull=value.get('notnull',False), 7600 unique=value.get('unique',False))) \ 7601 for key, value in sql_fields.iteritems()] 7602 mf.sort(lambda a,b: cmp(a[0],b[0])) 7603 self.define_table(name,*[item[1] for item in mf], 7604 **dict(migrate=migrate, 7605 fake_migrate=fake_migrate)) 7606 finally: 7607 self._adapter.file_close(tfile)
7608
7609 - def check_reserved_keyword(self, name):
7610 """ 7611 Validates ``name`` against SQL keywords 7612 Uses self.check_reserve which is a list of 7613 operators to use. 7614 self.check_reserved 7615 ['common', 'postgres', 'mysql'] 7616 self.check_reserved 7617 ['all'] 7618 """ 7619 for backend in self.check_reserved: 7620 if name.upper() in self.RSK[backend]: 7621 raise SyntaxError( 7622 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7623
7624 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7625 """ 7626 EXAMPLE: 7627 7628 db.define_table('person',Field('name'),Field('info')) 7629 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7630 7631 @request.restful() 7632 def index(): 7633 def GET(*args,**vars): 7634 patterns = [ 7635 "/friends[person]", 7636 "/{person.name}/:field", 7637 "/{person.name}/pets[pet.ownedby]", 7638 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7639 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7640 ("/dogs[pet]", db.pet.info=='dog'), 7641 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7642 ] 7643 parser = db.parse_as_rest(patterns,args,vars) 7644 if parser.status == 200: 7645 return dict(content=parser.response) 7646 else: 7647 raise HTTP(parser.status,parser.error) 7648 7649 def POST(table_name,**vars): 7650 if table_name == 'person': 7651 return db.person.validate_and_insert(**vars) 7652 elif table_name == 'pet': 7653 return db.pet.validate_and_insert(**vars) 7654 else: 7655 raise HTTP(400) 7656 return locals() 7657 """ 7658 7659 db = self 7660 re1 = REGEX_SEARCH_PATTERN 7661 re2 = REGEX_SQUARE_BRACKETS 7662 7663 def auto_table(table,base='',depth=0): 7664 patterns = [] 7665 for field in db[table].fields: 7666 if base: 7667 tag = '%s/%s' % (base,field.replace('_','-')) 7668 else: 7669 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7670 f = db[table][field] 7671 if not f.readable: continue 7672 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7673 tag += '/{%s.%s}' % (table,field) 7674 patterns.append(tag) 7675 patterns.append(tag+'/:field') 7676 elif f.type.startswith('boolean'): 7677 tag += '/{%s.%s}' % (table,field) 7678 patterns.append(tag) 7679 patterns.append(tag+'/:field') 7680 elif f.type in ('float','double','integer','bigint'): 7681 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7682 patterns.append(tag) 7683 patterns.append(tag+'/:field') 7684 elif f.type.startswith('list:'): 7685 tag += '/{%s.%s.contains}' % (table,field) 7686 patterns.append(tag) 7687 patterns.append(tag+'/:field') 7688 elif f.type in ('date','datetime'): 7689 tag+= '/{%s.%s.year}' % (table,field) 7690 patterns.append(tag) 7691 patterns.append(tag+'/:field') 7692 tag+='/{%s.%s.month}' % (table,field) 7693 patterns.append(tag) 7694 patterns.append(tag+'/:field') 7695 tag+='/{%s.%s.day}' % (table,field) 7696 patterns.append(tag) 7697 patterns.append(tag+'/:field') 7698 if f.type in ('datetime','time'): 7699 tag+= '/{%s.%s.hour}' % (table,field) 7700 patterns.append(tag) 7701 patterns.append(tag+'/:field') 7702 tag+='/{%s.%s.minute}' % (table,field) 7703 patterns.append(tag) 7704 patterns.append(tag+'/:field') 7705 tag+='/{%s.%s.second}' % (table,field) 7706 patterns.append(tag) 7707 patterns.append(tag+'/:field') 7708 if depth>0: 7709 for f in db[table]._referenced_by: 7710 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7711 patterns.append(tag) 7712 patterns += auto_table(table,base=tag,depth=depth-1) 7713 return patterns
7714 7715 if patterns == 'auto': 7716 patterns=[] 7717 for table in db.tables: 7718 if not table.startswith('auth_'): 7719 patterns.append('/%s[%s]' % (table,table)) 7720 patterns += auto_table(table,base='',depth=1) 7721 else: 7722 i = 0 7723 while i<len(patterns): 7724 pattern = patterns[i] 7725 if not isinstance(pattern,str): 7726 pattern = pattern[0] 7727 tokens = pattern.split('/') 7728 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7729 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7730 '/'.join(tokens[:-1])) 7731 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7732 i += len(new_patterns) 7733 else: 7734 i += 1 7735 if '/'.join(args) == 'patterns': 7736 return Row({'status':200,'pattern':'list', 7737 'error':None,'response':patterns}) 7738 for pattern in patterns: 7739 basequery, exposedfields = None, [] 7740 if isinstance(pattern,tuple): 7741 if len(pattern)==2: 7742 pattern, basequery = pattern 7743 elif len(pattern)>2: 7744 pattern, basequery, exposedfields = pattern[0:3] 7745 otable=table=None 7746 if not isinstance(queries,dict): 7747 dbset=db(queries) 7748 if basequery is not None: 7749 dbset = dbset(basequery) 7750 i=0 7751 tags = pattern[1:].split('/') 7752 if len(tags)!=len(args): 7753 continue 7754 for tag in tags: 7755 if re1.match(tag): 7756 # print 're1:'+tag 7757 tokens = tag[1:-1].split('.') 7758 table, field = tokens[0], tokens[1] 7759 if not otable or table == otable: 7760 if len(tokens)==2 or tokens[2]=='eq': 7761 query = db[table][field]==args[i] 7762 elif tokens[2]=='ne': 7763 query = db[table][field]!=args[i] 7764 elif tokens[2]=='lt': 7765 query = db[table][field]<args[i] 7766 elif tokens[2]=='gt': 7767 query = db[table][field]>args[i] 7768 elif tokens[2]=='ge': 7769 query = db[table][field]>=args[i] 7770 elif tokens[2]=='le': 7771 query = db[table][field]<=args[i] 7772 elif tokens[2]=='year': 7773 query = db[table][field].year()==args[i] 7774 elif tokens[2]=='month': 7775 query = db[table][field].month()==args[i] 7776 elif tokens[2]=='day': 7777 query = db[table][field].day()==args[i] 7778 elif tokens[2]=='hour': 7779 query = db[table][field].hour()==args[i] 7780 elif tokens[2]=='minute': 7781 query = db[table][field].minutes()==args[i] 7782 elif tokens[2]=='second': 7783 query = db[table][field].seconds()==args[i] 7784 elif tokens[2]=='startswith': 7785 query = db[table][field].startswith(args[i]) 7786 elif tokens[2]=='contains': 7787 query = db[table][field].contains(args[i]) 7788 else: 7789 raise RuntimeError("invalid pattern: %s" % pattern) 7790 if len(tokens)==4 and tokens[3]=='not': 7791 query = ~query 7792 elif len(tokens)>=4: 7793 raise RuntimeError("invalid pattern: %s" % pattern) 7794 if not otable and isinstance(queries,dict): 7795 dbset = db(queries[table]) 7796 if basequery is not None: 7797 dbset = dbset(basequery) 7798 dbset=dbset(query) 7799 else: 7800 raise RuntimeError("missing relation in pattern: %s" % pattern) 7801 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7802 ref = tag[tag.find('[')+1:-1] 7803 if '.' in ref and otable: 7804 table,field = ref.split('.') 7805 selfld = '_id' 7806 if db[table][field].type.startswith('reference '): 7807 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7808 else: 7809 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7810 if refs: 7811 selfld = refs[0] 7812 if nested_select: 7813 try: 7814 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7815 except ValueError: 7816 return Row({'status':400,'pattern':pattern, 7817 'error':'invalid path','response':None}) 7818 else: 7819 items = [item.id for item in dbset.select(db[otable][selfld])] 7820 dbset=db(db[table][field].belongs(items)) 7821 else: 7822 table = ref 7823 if not otable and isinstance(queries,dict): 7824 dbset = db(queries[table]) 7825 dbset=dbset(db[table]) 7826 elif tag==':field' and table: 7827 # print 're3:'+tag 7828 field = args[i] 7829 if not field in db[table]: break 7830 # hand-built patterns should respect .readable=False as well 7831 if not db[table][field].readable: 7832 return Row({'status':418,'pattern':pattern, 7833 'error':'I\'m a teapot','response':None}) 7834 try: 7835 distinct = vars.get('distinct', False) == 'True' 7836 offset = long(vars.get('offset',None) or 0) 7837 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7838 except ValueError: 7839 return Row({'status':400,'error':'invalid limits','response':None}) 7840 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7841 if items: 7842 return Row({'status':200,'response':items, 7843 'pattern':pattern}) 7844 else: 7845 return Row({'status':404,'pattern':pattern, 7846 'error':'no record found','response':None}) 7847 elif tag != args[i]: 7848 break 7849 otable = table 7850 i += 1 7851 if i==len(tags) and table: 7852 ofields = vars.get('order',db[table]._id.name).split('|') 7853 try: 7854 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7855 except (KeyError, AttributeError): 7856 return Row({'status':400,'error':'invalid orderby','response':None}) 7857 if exposedfields: 7858 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7859 else: 7860 fields = [field for field in db[table] if field.readable] 7861 count = dbset.count() 7862 try: 7863 offset = long(vars.get('offset',None) or 0) 7864 limits = (offset,long(vars.get('limit',None) or 1000)+offset) 7865 except ValueError: 7866 return Row({'status':400,'error':'invalid limits','response':None}) 7867 if count > limits[1]-limits[0]: 7868 return Row({'status':400,'error':'too many records','response':None}) 7869 try: 7870 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7871 except ValueError: 7872 return Row({'status':400,'pattern':pattern, 7873 'error':'invalid path','response':None}) 7874 return Row({'status':200,'response':response, 7875 'pattern':pattern,'count':count}) 7876 return Row({'status':400,'error':'no matching pattern','response':None})
7877
7878 - def define_table( 7879 self, 7880 tablename, 7881 *fields, 7882 **args 7883 ):
7884 if not fields and 'fields' in args: 7885 fields = args.get('fields',()) 7886 if not isinstance(tablename, str): 7887 if isinstance(tablename, unicode): 7888 try: 7889 tablename = str(tablename) 7890 except UnicodeEncodeError: 7891 raise SyntaxError("invalid unicode table name") 7892 else: 7893 raise SyntaxError("missing table name") 7894 elif hasattr(self,tablename) or tablename in self.tables: 7895 if not args.get('redefine',False): 7896 raise SyntaxError('table already defined: %s' % tablename) 7897 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7898 REGEX_PYTHON_KEYWORDS.match(tablename): 7899 raise SyntaxError('invalid table name: %s' % tablename) 7900 elif self.check_reserved: 7901 self.check_reserved_keyword(tablename) 7902 else: 7903 invalid_args = set(args)-TABLE_ARGS 7904 if invalid_args: 7905 raise SyntaxError('invalid table "%s" attributes: %s' \ 7906 % (tablename,invalid_args)) 7907 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7908 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7909 table = None 7910 else: 7911 table = self.lazy_define_table(tablename,*fields,**args) 7912 if not tablename in self.tables: 7913 self.tables.append(tablename) 7914 return table
7915
7916 - def lazy_define_table( 7917 self, 7918 tablename, 7919 *fields, 7920 **args 7921 ):
7922 args_get = args.get 7923 common_fields = self._common_fields 7924 if common_fields: 7925 fields = list(fields) + list(common_fields) 7926 7927 table_class = args_get('table_class',Table) 7928 table = table_class(self, tablename, *fields, **args) 7929 table._actual = True 7930 self[tablename] = table 7931 # must follow above line to handle self references 7932 table._create_references() 7933 for field in table: 7934 if field.requires == DEFAULT: 7935 field.requires = sqlhtml_validators(field) 7936 7937 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7938 if migrate and not self._uri in (None,'None') \ 7939 or self._adapter.dbengine=='google:datastore': 7940 fake_migrate = self._fake_migrate_all or \ 7941 args_get('fake_migrate',self._fake_migrate) 7942 polymodel = args_get('polymodel',None) 7943 try: 7944 GLOBAL_LOCKER.acquire() 7945 self._lastsql = self._adapter.create_table( 7946 table,migrate=migrate, 7947 fake_migrate=fake_migrate, 7948 polymodel=polymodel) 7949 finally: 7950 GLOBAL_LOCKER.release() 7951 else: 7952 table._dbt = None 7953 on_define = args_get('on_define',None) 7954 if on_define: on_define(table) 7955 return table
7956
7957 - def as_dict(self, flat=False, sanitize=True):
7958 db_uid = uri = None 7959 if not sanitize: 7960 uri, db_uid = (self._uri, self._db_uid) 7961 db_as_dict = dict(tables=[], uri=uri, db_uid=db_uid, 7962 **dict([(k, getattr(self, "_" + k, None)) 7963 for k in 'pool_size','folder','db_codec', 7964 'check_reserved','migrate','fake_migrate', 7965 'migrate_enabled','fake_migrate_all', 7966 'decode_credentials','driver_args', 7967 'adapter_args', 'attempts', 7968 'bigint_id','debug','lazy_tables', 7969 'do_connect'])) 7970 for table in self: 7971 db_as_dict["tables"].append(table.as_dict(flat=flat, 7972 sanitize=sanitize)) 7973 return db_as_dict
7974
7975 - def as_xml(self, sanitize=True):
7976 if not have_serializers: 7977 raise ImportError("No xml serializers available") 7978 d = self.as_dict(flat=True, sanitize=sanitize) 7979 return serializers.xml(d)
7980
7981 - def as_json(self, sanitize=True):
7982 if not have_serializers: 7983 raise ImportError("No json serializers available") 7984 d = self.as_dict(flat=True, sanitize=sanitize) 7985 return serializers.json(d)
7986
7987 - def as_yaml(self, sanitize=True):
7988 if not have_serializers: 7989 raise ImportError("No YAML serializers available") 7990 d = self.as_dict(flat=True, sanitize=sanitize) 7991 return serializers.yaml(d)
7992
7993 - def __contains__(self, tablename):
7994 try: 7995 return tablename in self.tables 7996 except AttributeError: 7997 # The instance has no .tables attribute yet 7998 return False
7999 8000 has_key = __contains__ 8001
8002 - def get(self,key,default=None):
8003 return self.__dict__.get(key,default)
8004
8005 - def __iter__(self):
8006 for tablename in self.tables: 8007 yield self[tablename]
8008
8009 - def __getitem__(self, key):
8010 return self.__getattr__(str(key))
8011
8012 - def __getattr__(self, key):
8013 if ogetattr(self,'_lazy_tables') and \ 8014 key in ogetattr(self,'_LAZY_TABLES'): 8015 tablename, fields, args = self._LAZY_TABLES.pop(key) 8016 return self.lazy_define_table(tablename,*fields,**args) 8017 return ogetattr(self, key)
8018
8019 - def __setitem__(self, key, value):
8020 osetattr(self, str(key), value)
8021
8022 - def __setattr__(self, key, value):
8023 if key[:1]!='_' and key in self: 8024 raise SyntaxError( 8025 'Object %s exists and cannot be redefined' % key) 8026 osetattr(self,key,value)
8027 8028 __delitem__ = object.__delattr__ 8029
8030 - def __repr__(self):
8031 if hasattr(self,'_uri'): 8032 return '<DAL uri="%s">' % hide_password(str(self._uri)) 8033 else: 8034 return '<DAL db_uid="%s">' % self._db_uid
8035
8036 - def smart_query(self,fields,text):
8037 return Set(self, smart_query(fields,text))
8038
8039 - def __call__(self, query=None, ignore_common_filters=None):
8040 if isinstance(query,Table): 8041 query = self._adapter.id_query(query) 8042 elif isinstance(query,Field): 8043 query = query!=None 8044 elif isinstance(query, dict): 8045 icf = query.get("ignore_common_filters") 8046 if icf: ignore_common_filters = icf 8047 return Set(self, query, ignore_common_filters=ignore_common_filters)
8048
8049 - def commit(self):
8050 self._adapter.commit()
8051
8052 - def rollback(self):
8053 self._adapter.rollback()
8054
8055 - def close(self):
8056 self._adapter.close() 8057 if self._db_uid in THREAD_LOCAL.db_instances: 8058 db_group = THREAD_LOCAL.db_instances[self._db_uid] 8059 db_group.remove(self) 8060 if not db_group: 8061 del THREAD_LOCAL.db_instances[self._db_uid]
8062
8063 - def executesql(self, query, placeholders=None, as_dict=False, 8064 fields=None, colnames=None):
8065 """ 8066 placeholders is optional and will always be None. 8067 If using raw SQL with placeholders, placeholders may be 8068 a sequence of values to be substituted in 8069 or, (if supported by the DB driver), a dictionary with keys 8070 matching named placeholders in your SQL. 8071 8072 Added 2009-12-05 "as_dict" optional argument. Will always be 8073 None when using DAL. If using raw SQL can be set to True 8074 and the results cursor returned by the DB driver will be 8075 converted to a sequence of dictionaries keyed with the db 8076 field names. Tested with SQLite but should work with any database 8077 since the cursor.description used to get field names is part of the 8078 Python dbi 2.0 specs. Results returned with as_dict=True are 8079 the same as those returned when applying .to_list() to a DAL query. 8080 8081 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 8082 8083 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 8084 is provided, the results cursor returned by the DB driver will be 8085 converted to a DAL Rows object using the db._adapter.parse() method. 8086 8087 The "fields" argument is a list of DAL Field objects that match the 8088 fields returned from the DB. The Field objects should be part of one or 8089 more Table objects defined on the DAL object. The "fields" list can 8090 include one or more DAL Table objects in addition to or instead of 8091 including Field objects, or it can be just a single table (not in a 8092 list). In that case, the Field objects will be extracted from the 8093 table(s). 8094 8095 Instead of specifying the "fields" argument, the "colnames" argument 8096 can be specified as a list of field names in tablename.fieldname format. 8097 Again, these should represent tables and fields defined on the DAL 8098 object. 8099 8100 It is also possible to specify both "fields" and the associated 8101 "colnames". In that case, "fields" can also include DAL Expression 8102 objects in addition to Field objects. For Field objects in "fields", 8103 the associated "colnames" must still be in tablename.fieldname format. 8104 For Expression objects in "fields", the associated "colnames" can 8105 be any arbitrary labels. 8106 8107 Note, the DAL Table objects referred to by "fields" or "colnames" can 8108 be dummy tables and do not have to represent any real tables in the 8109 database. Also, note that the "fields" and "colnames" must be in the 8110 same order as the fields in the results cursor returned from the DB. 8111 """ 8112 adapter = self._adapter 8113 if placeholders: 8114 adapter.execute(query, placeholders) 8115 else: 8116 adapter.execute(query) 8117 if as_dict: 8118 if not hasattr(adapter.cursor,'description'): 8119 raise RuntimeError("database does not support executesql(...,as_dict=True)") 8120 # Non-DAL legacy db query, converts cursor results to dict. 8121 # sequence of 7-item sequences. each sequence tells about a column. 8122 # first item is always the field name according to Python Database API specs 8123 columns = adapter.cursor.description 8124 # reduce the column info down to just the field names 8125 fields = [f[0] for f in columns] 8126 # will hold our finished resultset in a list 8127 data = adapter._fetchall() 8128 # convert the list for each row into a dictionary so it's 8129 # easier to work with. row['field_name'] rather than row[0] 8130 return [dict(zip(fields,row)) for row in data] 8131 try: 8132 data = adapter._fetchall() 8133 except: 8134 return None 8135 if fields or colnames: 8136 fields = [] if fields is None else fields 8137 if not isinstance(fields, list): 8138 fields = [fields] 8139 extracted_fields = [] 8140 for field in fields: 8141 if isinstance(field, Table): 8142 extracted_fields.extend([f for f in field]) 8143 else: 8144 extracted_fields.append(field) 8145 if not colnames: 8146 colnames = ['%s.%s' % (f.tablename, f.name) 8147 for f in extracted_fields] 8148 data = adapter.parse( 8149 data, fields=extracted_fields, colnames=colnames) 8150 return data
8151
8152 - def _remove_references_to(self, thistable):
8153 for table in self: 8154 table._referenced_by = [field for field in table._referenced_by 8155 if not field.table==thistable]
8156
8157 - def export_to_csv_file(self, ofile, *args, **kwargs):
8158 step = long(kwargs.get('max_fetch_rows,',500)) 8159 write_colnames = kwargs['write_colnames'] = \ 8160 kwargs.get("write_colnames", True) 8161 for table in self.tables: 8162 ofile.write('TABLE %s\r\n' % table) 8163 query = self._adapter.id_query(self[table]) 8164 nrows = self(query).count() 8165 kwargs['write_colnames'] = write_colnames 8166 for k in range(0,nrows,step): 8167 self(query).select(limitby=(k,k+step)).export_to_csv_file( 8168 ofile, *args, **kwargs) 8169 kwargs['write_colnames'] = False 8170 ofile.write('\r\n\r\n') 8171 ofile.write('END')
8172
8173 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 8174 unique='uuid', map_tablenames=None, 8175 ignore_missing_tables=False, 8176 *args, **kwargs):
8177 #if id_map is None: id_map={} 8178 id_offset = {} # only used if id_map is None 8179 map_tablenames = map_tablenames or {} 8180 for line in ifile: 8181 line = line.strip() 8182 if not line: 8183 continue 8184 elif line == 'END': 8185 return 8186 elif not line.startswith('TABLE ') or \ 8187 not line[6:] in self.tables: 8188 raise SyntaxError('invalid file format') 8189 else: 8190 tablename = line[6:] 8191 tablename = map_tablenames.get(tablename,tablename) 8192 if tablename is not None and tablename in self.tables: 8193 self[tablename].import_from_csv_file( 8194 ifile, id_map, null, unique, id_offset, 8195 *args, **kwargs) 8196 elif tablename is None or ignore_missing_tables: 8197 # skip all non-empty lines 8198 for line in ifile: 8199 if not line.strip(): 8200 break 8201 else: 8202 raise RuntimeError("Unable to import table that does not exist.\nTry db.import_from_csv_file(..., map_tablenames={'table':'othertable'},ignore_missing_tables=True)")
8203
8204 8205 -def DAL_unpickler(db_uid):
8206 return DAL('<zombie>',db_uid=db_uid)
8207
8208 -def DAL_pickler(db):
8209 return DAL_unpickler, (db._db_uid,)
8210 8211 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8212 8213 -class SQLALL(object):
8214 """ 8215 Helper class providing a comma-separated string having all the field names 8216 (prefixed by table name and '.') 8217 8218 normally only called from within gluon.sql 8219 """ 8220
8221 - def __init__(self, table):
8222 self._table = table
8223
8224 - def __str__(self):
8225 return ', '.join([str(field) for field in self._table])
8226
8227 # class Reference(int): 8228 -class Reference(long):
8229
8230 - def __allocate(self):
8231 if not self._record: 8232 self._record = self._table[long(self)] 8233 if not self._record: 8234 raise RuntimeError( 8235 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
8236
8237 - def __getattr__(self, key):
8238 if key == 'id': 8239 return long(self) 8240 if key in self._table: 8241 self.__allocate() 8242 if self._record: 8243 return self._record.get(key,None) # to deal with case self.update_record() 8244 else: 8245 return None
8246
8247 - def get(self, key, default=None):
8248 return self.__getattr__(key, default)
8249
8250 - def __setattr__(self, key, value):
8251 if key.startswith('_'): 8252 long.__setattr__(self, key, value) 8253 return 8254 self.__allocate() 8255 self._record[key] = value
8256
8257 - def __getitem__(self, key):
8258 if key == 'id': 8259 return long(self) 8260 self.__allocate() 8261 return self._record.get(key, None)
8262
8263 - def __setitem__(self,key,value):
8264 self.__allocate() 8265 self._record[key] = value
8266
8267 8268 -def Reference_unpickler(data):
8269 return marshal.loads(data)
8270
8271 -def Reference_pickler(data):
8272 try: 8273 marshal_dump = marshal.dumps(long(data)) 8274 except AttributeError: 8275 marshal_dump = 'i%s' % struct.pack('<i', long(data)) 8276 return (Reference_unpickler, (marshal_dump,))
8277 8278 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8279 8280 -class MethodAdder(object):
8281 - def __init__(self,table):
8282 self.table = table
8283 - def __call__(self):
8284 return self.register()
8285 - def __getattr__(self,method_name):
8286 return self.register(method_name)
8287 - def register(self,method_name=None):
8288 def _decorated(f): 8289 instance = self.table 8290 import types 8291 method = types.MethodType(f, instance, instance.__class__) 8292 name = method_name or f.func_name 8293 setattr(instance, name, method) 8294 return f
8295 return _decorated
8296
8297 -class Table(object):
8298 8299 """ 8300 an instance of this class represents a database table 8301 8302 Example:: 8303 8304 db = DAL(...) 8305 db.define_table('users', Field('name')) 8306 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8307 db.users.drop() 8308 """ 8309
8310 - def __init__( 8311 self, 8312 db, 8313 tablename, 8314 *fields, 8315 **args 8316 ):
8317 """ 8318 Initializes the table and performs checking on the provided fields. 8319 8320 Each table will have automatically an 'id'. 8321 8322 If a field is of type Table, the fields (excluding 'id') from that table 8323 will be used instead. 8324 8325 :raises SyntaxError: when a supplied field is of incorrect type. 8326 """ 8327 self._actual = False # set to True by define_table() 8328 self._tablename = tablename 8329 self._ot = args.get('actual_name') 8330 self._sequence_name = args.get('sequence_name') or \ 8331 db and db._adapter.sequence_name(tablename) 8332 self._trigger_name = args.get('trigger_name') or \ 8333 db and db._adapter.trigger_name(tablename) 8334 self._common_filter = args.get('common_filter') 8335 self._format = args.get('format') 8336 self._singular = args.get( 8337 'singular',tablename.replace('_',' ').capitalize()) 8338 self._plural = args.get( 8339 'plural',pluralize(self._singular.lower()).capitalize()) 8340 # horrible but for backard compatibility of appamdin: 8341 if 'primarykey' in args and args['primarykey'] is not None: 8342 self._primarykey = args.get('primarykey') 8343 8344 self._before_insert = [] 8345 self._before_update = [Set.delete_uploaded_files] 8346 self._before_delete = [Set.delete_uploaded_files] 8347 self._after_insert = [] 8348 self._after_update = [] 8349 self._after_delete = [] 8350 8351 self.add_method = MethodAdder(self) 8352 8353 fieldnames,newfields=set(),[] 8354 _primarykey = getattr(self, '_primarykey', None) 8355 if _primarykey is not None: 8356 if not isinstance(_primarykey, list): 8357 raise SyntaxError( 8358 "primarykey must be a list of fields from table '%s'" \ 8359 % tablename) 8360 if len(_primarykey)==1: 8361 self._id = [f for f in fields if isinstance(f,Field) \ 8362 and f.name==_primarykey[0]][0] 8363 elif not [f for f in fields if (isinstance(f,Field) and 8364 f.type=='id') or (isinstance(f, dict) and 8365 f.get("type", None)=="id")]: 8366 field = Field('id', 'id') 8367 newfields.append(field) 8368 fieldnames.add('id') 8369 self._id = field 8370 virtual_fields = [] 8371 def include_new(field): 8372 newfields.append(field) 8373 fieldnames.add(field.name) 8374 if field.type=='id': 8375 self._id = field
8376 for field in fields: 8377 if isinstance(field, (FieldMethod, FieldVirtual)): 8378 virtual_fields.append(field) 8379 elif isinstance(field, Field) and not field.name in fieldnames: 8380 if field.db is not None: 8381 field = copy.copy(field) 8382 include_new(field) 8383 elif isinstance(field, dict) and not field['fieldname'] in fieldnames: 8384 include_new(Field(**field)) 8385 elif isinstance(field, Table): 8386 table = field 8387 for field in table: 8388 if not field.name in fieldnames and not field.type=='id': 8389 t2 = not table._actual and self._tablename 8390 include_new(field.clone(point_self_references_to=t2)) 8391 elif not isinstance(field, (Field, Table)): 8392 raise SyntaxError( 8393 'define_table argument is not a Field or Table: %s' % field) 8394 fields = newfields 8395 self._db = db 8396 tablename = tablename 8397 self._fields = SQLCallableList() 8398 self.virtualfields = [] 8399 fields = list(fields) 8400 8401 if db and db._adapter.uploads_in_blob==True: 8402 uploadfields = [f.name for f in fields if f.type=='blob'] 8403 for field in fields: 8404 fn = field.uploadfield 8405 if isinstance(field, Field) and field.type == 'upload'\ 8406 and fn is True: 8407 fn = field.uploadfield = '%s_blob' % field.name 8408 if isinstance(fn,str) and not fn in uploadfields: 8409 fields.append(Field(fn,'blob',default='', 8410 writable=False,readable=False)) 8411 8412 lower_fieldnames = set() 8413 reserved = dir(Table) + ['fields'] 8414 if (db and db.check_reserved): 8415 check_reserved = db.check_reserved_keyword 8416 else: 8417 def check_reserved(field_name): 8418 if field_name in reserved: 8419 raise SyntaxError("field name %s not allowed" % field_name)
8420 for field in fields: 8421 field_name = field.name 8422 check_reserved(field_name) 8423 fn_lower = field_name.lower() 8424 if fn_lower in lower_fieldnames: 8425 raise SyntaxError("duplicate field %s in table %s" \ 8426 % (field_name, tablename)) 8427 else: 8428 lower_fieldnames.add(fn_lower) 8429 8430 self.fields.append(field_name) 8431 self[field_name] = field 8432 if field.type == 'id': 8433 self['id'] = field 8434 field.tablename = field._tablename = tablename 8435 field.table = field._table = self 8436 field.db = field._db = db 8437 self.ALL = SQLALL(self) 8438 8439 if _primarykey is not None: 8440 for k in _primarykey: 8441 if k not in self.fields: 8442 raise SyntaxError( 8443 "primarykey must be a list of fields from table '%s " % tablename) 8444 else: 8445 self[k].notnull = True 8446 for field in virtual_fields: 8447 self[field.name] = field 8448 8449 @property
8450 - def fields(self):
8451 return self._fields
8452
8453 - def update(self,*args,**kwargs):
8454 raise RuntimeError("Syntax Not Supported")
8455
8456 - def _enable_record_versioning(self, 8457 archive_db=None, 8458 archive_name = '%(tablename)s_archive', 8459 is_active = 'is_active', 8460 current_record = 'current_record', 8461 current_record_label = None):
8462 db = self._db 8463 archive_db = archive_db or db 8464 archive_name = archive_name % dict(tablename=self._tablename) 8465 if archive_name in archive_db.tables(): 8466 return # do not try define the archive if already exists 8467 fieldnames = self.fields() 8468 same_db = archive_db is db 8469 field_type = self if same_db else 'bigint' 8470 clones = [] 8471 for field in self: 8472 nfk = same_db or not field.type.startswith('reference') 8473 clones.append(field.clone( 8474 unique=False, type=field.type if nfk else 'bigint')) 8475 archive_db.define_table( 8476 archive_name, 8477 Field(current_record,field_type,label=current_record_label), 8478 *clones,**dict(format=self._format)) 8479 8480 self._before_update.append( 8481 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8482 archive_record(qset,fs,db[an],cn)) 8483 if is_active and is_active in fieldnames: 8484 self._before_delete.append( 8485 lambda qset: qset.update(is_active=False)) 8486 newquery = lambda query, t=self, name=self._tablename: \ 8487 reduce(AND,[db[tn].is_active == True 8488 for tn in db._adapter.tables(query) 8489 if tn==name or getattr(db[tn],'_ot',None)==name]) 8490 query = self._common_filter 8491 if query: 8492 newquery = query & newquery 8493 self._common_filter = newquery
8494
8495 - def _validate(self,**vars):
8496 errors = Row() 8497 for key,value in vars.iteritems(): 8498 value,error = self[key].validate(value) 8499 if error: 8500 errors[key] = error 8501 return errors
8502
8503 - def _create_references(self):
8504 db = self._db 8505 pr = db._pending_references 8506 self._referenced_by = [] 8507 self._references = [] 8508 for field in self: 8509 fieldname = field.name 8510 field_type = field.type 8511 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8512 ref = field_type[10:].strip() 8513 if not ref: 8514 SyntaxError('Table: reference to nothing: %s' %ref) 8515 if '.' in ref: 8516 rtablename, throw_it,rfieldname = ref.partition('.') 8517 else: 8518 rtablename, rfieldname = ref, None 8519 if not rtablename in db: 8520 pr[rtablename] = pr.get(rtablename,[]) + [field] 8521 continue 8522 rtable = db[rtablename] 8523 if rfieldname: 8524 if not hasattr(rtable,'_primarykey'): 8525 raise SyntaxError( 8526 'keyed tables can only reference other keyed tables (for now)') 8527 if rfieldname not in rtable.fields: 8528 raise SyntaxError( 8529 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8530 % (rfieldname, rtablename, self._tablename)) 8531 rfield = rtable[rfieldname] 8532 else: 8533 rfield = rtable._id 8534 rtable._referenced_by.append(field) 8535 field.referent = rfield 8536 self._references.append(field) 8537 else: 8538 field.referent = None 8539 for referee in pr.get(self._tablename,[]): 8540 self._referenced_by.append(referee)
8541
8542 - def _filter_fields(self, record, id=False):
8543 return dict([(k, v) for (k, v) in record.iteritems() if k 8544 in self.fields and (self[k].type!='id' or id)])
8545
8546 - def _build_query(self,key):
8547 """ for keyed table only """ 8548 query = None 8549 for k,v in key.iteritems(): 8550 if k in self._primarykey: 8551 if query: 8552 query = query & (self[k] == v) 8553 else: 8554 query = (self[k] == v) 8555 else: 8556 raise SyntaxError( 8557 'Field %s is not part of the primary key of %s' % \ 8558 (k,self._tablename)) 8559 return query
8560
8561 - def __getitem__(self, key):
8562 if not key: 8563 return None 8564 elif isinstance(key, dict): 8565 """ for keyed table """ 8566 query = self._build_query(key) 8567 return self._db(query).select(limitby=(0,1), orderby_on_limitby=False).first() 8568 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8569 return self._db(self._id == key).select(limitby=(0,1), orderby_on_limitby=False).first() 8570 elif key: 8571 return ogetattr(self, str(key))
8572
8573 - def __call__(self, key=DEFAULT, **kwargs):
8574 for_update = kwargs.get('_for_update',False) 8575 if '_for_update' in kwargs: del kwargs['_for_update'] 8576 8577 orderby = kwargs.get('_orderby',None) 8578 if '_orderby' in kwargs: del kwargs['_orderby'] 8579 8580 if not key is DEFAULT: 8581 if isinstance(key, Query): 8582 record = self._db(key).select( 8583 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8584 elif not str(key).isdigit(): 8585 record = None 8586 else: 8587 record = self._db(self._id == key).select( 8588 limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8589 if record: 8590 for k,v in kwargs.iteritems(): 8591 if record[k]!=v: return None 8592 return record 8593 elif kwargs: 8594 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8595 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() 8596 else: 8597 return None
8598
8599 - def __setitem__(self, key, value):
8600 if isinstance(key, dict) and isinstance(value, dict): 8601 """ option for keyed table """ 8602 if set(key.keys()) == set(self._primarykey): 8603 value = self._filter_fields(value) 8604 kv = {} 8605 kv.update(value) 8606 kv.update(key) 8607 if not self.insert(**kv): 8608 query = self._build_query(key) 8609 self._db(query).update(**self._filter_fields(value)) 8610 else: 8611 raise SyntaxError( 8612 'key must have all fields from primary key: %s'%\ 8613 (self._primarykey)) 8614 elif str(key).isdigit(): 8615 if key == 0: 8616 self.insert(**self._filter_fields(value)) 8617 elif self._db(self._id == key)\ 8618 .update(**self._filter_fields(value)) is None: 8619 raise SyntaxError('No such record: %s' % key) 8620 else: 8621 if isinstance(key, dict): 8622 raise SyntaxError( 8623 'value must be a dictionary: %s' % value) 8624 osetattr(self, str(key), value)
8625 8626 __getattr__ = __getitem__ 8627
8628 - def __setattr__(self, key, value):
8629 if key[:1]!='_' and key in self: 8630 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8631 osetattr(self,key,value)
8632
8633 - def __delitem__(self, key):
8634 if isinstance(key, dict): 8635 query = self._build_query(key) 8636 if not self._db(query).delete(): 8637 raise SyntaxError('No such record: %s' % key) 8638 elif not str(key).isdigit() or \ 8639 not self._db(self._id == key).delete(): 8640 raise SyntaxError('No such record: %s' % key)
8641
8642 - def __contains__(self,key):
8643 return hasattr(self,key)
8644 8645 has_key = __contains__ 8646
8647 - def items(self):
8648 return self.__dict__.items()
8649
8650 - def __iter__(self):
8651 for fieldname in self.fields: 8652 yield self[fieldname]
8653
8654 - def iteritems(self):
8655 return self.__dict__.iteritems()
8656 8657
8658 - def __repr__(self):
8659 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8660
8661 - def __str__(self):
8662 if self._ot is not None: 8663 ot = self._db._adapter.QUOTE_TEMPLATE % self._ot 8664 if 'Oracle' in str(type(self._db._adapter)): 8665 return '%s %s' % (ot, self._tablename) 8666 return '%s AS %s' % (ot, self._tablename) 8667 return self._tablename
8668
8669 - def _drop(self, mode = ''):
8670 return self._db._adapter._drop(self, mode)
8671
8672 - def drop(self, mode = ''):
8673 return self._db._adapter.drop(self,mode)
8674
8675 - def _listify(self,fields,update=False):
8676 new_fields = {} # format: new_fields[name] = (field,value) 8677 8678 # store all fields passed as input in new_fields 8679 for name in fields: 8680 if not name in self.fields: 8681 if name != 'id': 8682 raise SyntaxError( 8683 'Field %s does not belong to the table' % name) 8684 else: 8685 field = self[name] 8686 value = fields[name] 8687 if field.filter_in: 8688 value = field.filter_in(value) 8689 new_fields[name] = (field,value) 8690 8691 # check all fields that should be in the table but are not passed 8692 to_compute = [] 8693 for ofield in self: 8694 name = ofield.name 8695 if not name in new_fields: 8696 # if field is supposed to be computed, compute it! 8697 if ofield.compute: # save those to compute for later 8698 to_compute.append((name,ofield)) 8699 # if field is required, check its default value 8700 elif not update and not ofield.default is None: 8701 value = ofield.default 8702 fields[name] = value 8703 new_fields[name] = (ofield,value) 8704 # if this is an update, user the update field instead 8705 elif update and not ofield.update is None: 8706 value = ofield.update 8707 fields[name] = value 8708 new_fields[name] = (ofield,value) 8709 # if the field is still not there but it should, error 8710 elif not update and ofield.required: 8711 raise RuntimeError( 8712 'Table: missing required field: %s' % name) 8713 # now deal with fields that are supposed to be computed 8714 if to_compute: 8715 row = Row(fields) 8716 for name,ofield in to_compute: 8717 # try compute it 8718 try: 8719 row[name] = new_value = ofield.compute(row) 8720 new_fields[name] = (ofield, new_value) 8721 except (KeyError, AttributeError): 8722 # error silently unless field is required! 8723 if ofield.required: 8724 raise SyntaxError('unable to compute field: %s' % name) 8725 return new_fields.values()
8726
8727 - def _attempt_upload(self, fields):
8728 for field in self: 8729 if field.type=='upload' and field.name in fields: 8730 value = fields[field.name] 8731 if value is not None and not isinstance(value,str): 8732 if hasattr(value,'file') and hasattr(value,'filename'): 8733 new_name = field.store(value.file,filename=value.filename) 8734 elif hasattr(value,'read') and hasattr(value,'name'): 8735 new_name = field.store(value,filename=value.name) 8736 else: 8737 raise RuntimeError("Unable to handle upload") 8738 fields[field.name] = new_name
8739
8740 - def _defaults(self, fields):
8741 "If there are no fields/values specified, return table defaults" 8742 if not fields: 8743 fields = {} 8744 for field in self: 8745 if field.type != "id": 8746 fields[field.name] = field.default 8747 return fields
8748
8749 - def _insert(self, **fields):
8750 fields = self._defaults(fields) 8751 return self._db._adapter._insert(self, self._listify(fields))
8752
8753 - def insert(self, **fields):
8754 fields = self._defaults(fields) 8755 self._attempt_upload(fields) 8756 if any(f(fields) for f in self._before_insert): return 0 8757 ret = self._db._adapter.insert(self, self._listify(fields)) 8758 if ret and self._after_insert: 8759 fields = Row(fields) 8760 [f(fields,ret) for f in self._after_insert] 8761 return ret
8762
8763 - def validate_and_insert(self,**fields):
8764 response = Row() 8765 response.errors = Row() 8766 new_fields = copy.copy(fields) 8767 for key,value in fields.iteritems(): 8768 value,error = self[key].validate(value) 8769 if error: 8770 response.errors[key] = "%s" % error 8771 else: 8772 new_fields[key] = value 8773 if not response.errors: 8774 response.id = self.insert(**new_fields) 8775 else: 8776 response.id = None 8777 return response
8778
8779 - def validate_and_update(self, _key=DEFAULT, **fields):
8780 response = Row() 8781 response.errors = Row() 8782 new_fields = copy.copy(fields) 8783 8784 for key,value in fields.iteritems(): 8785 value,error = self[key].validate(value) 8786 if error: 8787 response.errors[key] = "%s" % error 8788 else: 8789 new_fields[key] = value 8790 8791 if _key is DEFAULT: 8792 record = self(**values) 8793 elif isinstance(_key,dict): 8794 record = self(**_key) 8795 else: 8796 record = self(_key) 8797 8798 if not response.errors and record: 8799 row = self._db(self._id==_key) 8800 response.id = row.update(**fields) 8801 else: 8802 response.id = None 8803 return response
8804
8805 - def update_or_insert(self, _key=DEFAULT, **values):
8806 if _key is DEFAULT: 8807 record = self(**values) 8808 elif isinstance(_key,dict): 8809 record = self(**_key) 8810 else: 8811 record = self(_key) 8812 if record: 8813 record.update_record(**values) 8814 newid = None 8815 else: 8816 newid = self.insert(**values) 8817 return newid
8818
8819 - def bulk_insert(self, items):
8820 """ 8821 here items is a list of dictionaries 8822 """ 8823 items = [self._listify(item) for item in items] 8824 if any(f(item) for item in items for f in self._before_insert):return 0 8825 ret = self._db._adapter.bulk_insert(self,items) 8826 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8827 return ret
8828
8829 - def _truncate(self, mode = None):
8830 return self._db._adapter._truncate(self, mode)
8831
8832 - def truncate(self, mode = None):
8833 return self._db._adapter.truncate(self, mode)
8834
8835 - def import_from_csv_file( 8836 self, 8837 csvfile, 8838 id_map=None, 8839 null='<NULL>', 8840 unique='uuid', 8841 id_offset=None, # id_offset used only when id_map is None 8842 *args, **kwargs 8843 ):
8844 """ 8845 Import records from csv file. 8846 Column headers must have same names as table fields. 8847 Field 'id' is ignored. 8848 If column names read 'table.file' the 'table.' prefix is ignored. 8849 'unique' argument is a field which must be unique 8850 (typically a uuid field) 8851 'restore' argument is default False; 8852 if set True will remove old values in table first. 8853 'id_map' ff set to None will not map ids. 8854 The import will keep the id numbers in the restored table. 8855 This assumes that there is an field of type id that 8856 is integer and in incrementing order. 8857 Will keep the id numbers in restored table. 8858 """ 8859 8860 delimiter = kwargs.get('delimiter', ',') 8861 quotechar = kwargs.get('quotechar', '"') 8862 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8863 restore = kwargs.get('restore', False) 8864 if restore: 8865 self._db[self].truncate() 8866 8867 reader = csv.reader(csvfile, delimiter=delimiter, 8868 quotechar=quotechar, quoting=quoting) 8869 colnames = None 8870 if isinstance(id_map, dict): 8871 if not self._tablename in id_map: 8872 id_map[self._tablename] = {} 8873 id_map_self = id_map[self._tablename] 8874 8875 def fix(field, value, id_map, id_offset): 8876 list_reference_s='list:reference' 8877 if value == null: 8878 value = None 8879 elif field.type=='blob': 8880 value = base64.b64decode(value) 8881 elif field.type=='double' or field.type=='float': 8882 if not value.strip(): 8883 value = None 8884 else: 8885 value = float(value) 8886 elif field.type in ('integer','bigint'): 8887 if not value.strip(): 8888 value = None 8889 else: 8890 value = long(value) 8891 elif field.type.startswith('list:string'): 8892 value = bar_decode_string(value) 8893 elif field.type.startswith(list_reference_s): 8894 ref_table = field.type[len(list_reference_s):].strip() 8895 if id_map is not None: 8896 value = [id_map[ref_table][long(v)] \ 8897 for v in bar_decode_string(value)] 8898 else: 8899 value = [v for v in bar_decode_string(value)] 8900 elif field.type.startswith('list:'): 8901 value = bar_decode_integer(value) 8902 elif id_map and field.type.startswith('reference'): 8903 try: 8904 value = id_map[field.type[9:].strip()][long(value)] 8905 except KeyError: 8906 pass 8907 elif id_offset and field.type.startswith('reference'): 8908 try: 8909 value = id_offset[field.type[9:].strip()]+long(value) 8910 except KeyError: 8911 pass 8912 return (field.name, value)
8913 8914 def is_id(colname): 8915 if colname in self: 8916 return self[colname].type == 'id' 8917 else: 8918 return False 8919 8920 first = True 8921 unique_idx = None 8922 for lineno, line in enumerate(reader): 8923 if not line: 8924 break 8925 if not colnames: 8926 # assume this is the first line of the input, contains colnames 8927 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8928 cols, cid = [], None 8929 for i,colname in enumerate(colnames): 8930 if is_id(colname): 8931 cid = i 8932 elif colname in self.fields: 8933 cols.append((i,self[colname])) 8934 if colname == unique: 8935 unique_idx = i 8936 else: 8937 # every other line contains instead data 8938 items = [] 8939 for i, field in cols: 8940 try: 8941 items.append(fix(field, line[i], id_map, id_offset)) 8942 except ValueError: 8943 raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" 8944 % (lineno+1,field,line[i])) 8945 8946 if not (id_map or cid is None or id_offset is None or unique_idx): 8947 csv_id = long(line[cid]) 8948 curr_id = self.insert(**dict(items)) 8949 if first: 8950 first = False 8951 # First curr_id is bigger than csv_id, 8952 # then we are not restoring but 8953 # extending db table with csv db table 8954 id_offset[self._tablename] = (curr_id-csv_id) \ 8955 if curr_id>csv_id else 0 8956 # create new id until we get the same as old_id+offset 8957 while curr_id<csv_id+id_offset[self._tablename]: 8958 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8959 curr_id = self.insert(**dict(items)) 8960 # Validation. Check for duplicate of 'unique' &, 8961 # if present, update instead of insert. 8962 elif not unique_idx: 8963 new_id = self.insert(**dict(items)) 8964 else: 8965 unique_value = line[unique_idx] 8966 query = self._db[self][unique] == unique_value 8967 record = self._db(query).select().first() 8968 if record: 8969 record.update_record(**dict(items)) 8970 new_id = record[self._id.name] 8971 else: 8972 new_id = self.insert(**dict(items)) 8973 if id_map and cid is not None: 8974 id_map_self[long(line[cid])] = new_id 8975
8976 - def as_dict(self, flat=False, sanitize=True):
8977 table_as_dict = dict(tablename=str(self), fields=[], 8978 sequence_name=self._sequence_name, 8979 trigger_name=self._trigger_name, 8980 common_filter=self._common_filter, format=self._format, 8981 singular=self._singular, plural=self._plural) 8982 8983 for field in self: 8984 if (field.readable or field.writable) or (not sanitize): 8985 table_as_dict["fields"].append(field.as_dict( 8986 flat=flat, sanitize=sanitize)) 8987 return table_as_dict
8988
8989 - def as_xml(self, sanitize=True):
8990 if not have_serializers: 8991 raise ImportError("No xml serializers available") 8992 d = self.as_dict(flat=True, sanitize=sanitize) 8993 return serializers.xml(d)
8994
8995 - def as_json(self, sanitize=True):
8996 if not have_serializers: 8997 raise ImportError("No json serializers available") 8998 d = self.as_dict(flat=True, sanitize=sanitize) 8999 return serializers.json(d)
9000
9001 - def as_yaml(self, sanitize=True):
9002 if not have_serializers: 9003 raise ImportError("No YAML serializers available") 9004 d = self.as_dict(flat=True, sanitize=sanitize) 9005 return serializers.yaml(d)
9006
9007 - def with_alias(self, alias):
9008 return self._db._adapter.alias(self,alias)
9009
9010 - def on(self, query):
9011 return Expression(self._db,self._db._adapter.ON,self,query)
9012
9013 -def archive_record(qset,fs,archive_table,current_record):
9014 tablenames = qset.db._adapter.tables(qset.query) 9015 if len(tablenames)!=1: raise RuntimeError("cannot update join") 9016 table = qset.db[tablenames[0]] 9017 for row in qset.select(): 9018 fields = archive_table._filter_fields(row) 9019 fields[current_record] = row.id 9020 archive_table.insert(**fields) 9021 return False
9022
9023 9024 9025 -class Expression(object):
9026
9027 - def __init__( 9028 self, 9029 db, 9030 op, 9031 first=None, 9032 second=None, 9033 type=None, 9034 **optional_args 9035 ):
9036 9037 self.db = db 9038 self.op = op 9039 self.first = first 9040 self.second = second 9041 self._table = getattr(first,'_table',None) 9042 ### self._tablename = first._tablename ## CHECK 9043 if not type and first and hasattr(first,'type'): 9044 self.type = first.type 9045 else: 9046 self.type = type 9047 self.optional_args = optional_args
9048
9049 - def sum(self):
9050 db = self.db 9051 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
9052
9053 - def max(self):
9054 db = self.db 9055 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
9056
9057 - def min(self):
9058 db = self.db 9059 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
9060
9061 - def len(self):
9062 db = self.db 9063 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
9064
9065 - def avg(self):
9066 db = self.db 9067 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
9068
9069 - def abs(self):
9070 db = self.db 9071 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
9072
9073 - def lower(self):
9074 db = self.db 9075 return Expression(db, db._adapter.LOWER, self, None, self.type)
9076
9077 - def upper(self):
9078 db = self.db 9079 return Expression(db, db._adapter.UPPER, self, None, self.type)
9080
9081 - def replace(self,a,b):
9082 db = self.db 9083 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
9084
9085 - def year(self):
9086 db = self.db 9087 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
9088
9089 - def month(self):
9090 db = self.db 9091 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
9092
9093 - def day(self):
9094 db = self.db 9095 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
9096
9097 - def hour(self):
9098 db = self.db 9099 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
9100
9101 - def minutes(self):
9102 db = self.db 9103 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
9104
9105 - def coalesce(self,*others):
9106 db = self.db 9107 return Expression(db, db._adapter.COALESCE, self, others, self.type)
9108
9109 - def coalesce_zero(self):
9110 db = self.db 9111 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
9112
9113 - def seconds(self):
9114 db = self.db 9115 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
9116
9117 - def epoch(self):
9118 db = self.db 9119 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
9120
9121 - def __getslice__(self, start, stop):
9122 db = self.db 9123 if start < 0: 9124 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 9125 else: 9126 pos0 = start + 1 9127 9128 if stop < 0: 9129 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 9130 elif stop == sys.maxint: 9131 length = self.len() 9132 else: 9133 length = '(%s - %s)' % (stop + 1, pos0) 9134 return Expression(db,db._adapter.SUBSTRING, 9135 self, (pos0, length), self.type)
9136
9137 - def __getitem__(self, i):
9138 return self[i:i + 1]
9139
9140 - def __str__(self):
9141 return self.db._adapter.expand(self,self.type)
9142
9143 - def __or__(self, other): # for use in sortby
9144 db = self.db 9145 return Expression(db,db._adapter.COMMA,self,other,self.type)
9146
9147 - def __invert__(self):
9148 db = self.db 9149 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 9150 return self.first 9151 return Expression(db,db._adapter.INVERT,self,type=self.type)
9152
9153 - def __add__(self, other):
9154 db = self.db 9155 return Expression(db,db._adapter.ADD,self,other,self.type)
9156
9157 - def __sub__(self, other):
9158 db = self.db 9159 if self.type in ('integer','bigint'): 9160 result_type = 'integer' 9161 elif self.type in ['date','time','datetime','double','float']: 9162 result_type = 'double' 9163 elif self.type.startswith('decimal('): 9164 result_type = self.type 9165 else: 9166 raise SyntaxError("subtraction operation not supported for type") 9167 return Expression(db,db._adapter.SUB,self,other,result_type)
9168
9169 - def __mul__(self, other):
9170 db = self.db 9171 return Expression(db,db._adapter.MUL,self,other,self.type)
9172
9173 - def __div__(self, other):
9174 db = self.db 9175 return Expression(db,db._adapter.DIV,self,other,self.type)
9176
9177 - def __mod__(self, other):
9178 db = self.db 9179 return Expression(db,db._adapter.MOD,self,other,self.type)
9180
9181 - def __eq__(self, value):
9182 db = self.db 9183 return Query(db, db._adapter.EQ, self, value)
9184
9185 - def __ne__(self, value):
9186 db = self.db 9187 return Query(db, db._adapter.NE, self, value)
9188
9189 - def __lt__(self, value):
9190 db = self.db 9191 return Query(db, db._adapter.LT, self, value)
9192
9193 - def __le__(self, value):
9194 db = self.db 9195 return Query(db, db._adapter.LE, self, value)
9196
9197 - def __gt__(self, value):
9198 db = self.db 9199 return Query(db, db._adapter.GT, self, value)
9200
9201 - def __ge__(self, value):
9202 db = self.db 9203 return Query(db, db._adapter.GE, self, value)
9204
9205 - def like(self, value, case_sensitive=False):
9206 db = self.db 9207 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 9208 return Query(db, op, self, value)
9209
9210 - def regexp(self, value):
9211 db = self.db 9212 return Query(db, db._adapter.REGEXP, self, value)
9213
9214 - def belongs(self, *value, **kwattr):
9215 """ 9216 Accepts the following inputs: 9217 field.belongs(1,2) 9218 field.belongs((1,2)) 9219 field.belongs(query) 9220 9221 Does NOT accept: 9222 field.belongs(1) 9223 """ 9224 db = self.db 9225 if len(value) == 1: 9226 value = value[0] 9227 if isinstance(value,Query): 9228 value = db(value)._select(value.first._table._id) 9229 elif not isinstance(value, basestring): 9230 value = set(value) 9231 if kwattr.get('null') and None in value: 9232 value.remove(None) 9233 return (self == None) | Query(db, db._adapter.BELONGS, self, value) 9234 return Query(db, db._adapter.BELONGS, self, value)
9235
9236 - def startswith(self, value):
9237 db = self.db 9238 if not self.type in ('string', 'text', 'json', 'upload'): 9239 raise SyntaxError("startswith used with incompatible field type") 9240 return Query(db, db._adapter.STARTSWITH, self, value)
9241
9242 - def endswith(self, value):
9243 db = self.db 9244 if not self.type in ('string', 'text', 'json', 'upload'): 9245 raise SyntaxError("endswith used with incompatible field type") 9246 return Query(db, db._adapter.ENDSWITH, self, value)
9247
9248 - def contains(self, value, all=False, case_sensitive=False):
9249 """ 9250 The case_sensitive parameters is only useful for PostgreSQL 9251 For other RDMBs it is ignored and contains is always case in-sensitive 9252 For MongoDB and GAE contains is always case sensitive 9253 """ 9254 db = self.db 9255 if isinstance(value,(list, tuple)): 9256 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 9257 for v in value if str(v).strip()] 9258 if not subqueries: 9259 return self.contains('') 9260 else: 9261 return reduce(all and AND or OR,subqueries) 9262 if not self.type in ('string', 'text', 'json', 'upload') and not self.type.startswith('list:'): 9263 raise SyntaxError("contains used with incompatible field type") 9264 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
9265
9266 - def with_alias(self, alias):
9267 db = self.db 9268 return Expression(db, db._adapter.AS, self, alias, self.type)
9269 9270 # GIS expressions 9271
9272 - def st_asgeojson(self, precision=15, options=0, version=1):
9273 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9274 dict(precision=precision, options=options, 9275 version=version), 'string')
9276
9277 - def st_astext(self):
9278 db = self.db 9279 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9280
9281 - def st_x(self):
9282 db = self.db 9283 return Expression(db, db._adapter.ST_X, self, type='string')
9284
9285 - def st_y(self):
9286 db = self.db 9287 return Expression(db, db._adapter.ST_Y, self, type='string')
9288
9289 - def st_distance(self, other):
9290 db = self.db 9291 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9292
9293 - def st_simplify(self, value):
9294 db = self.db 9295 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9296 9297 # GIS queries 9298
9299 - def st_contains(self, value):
9300 db = self.db 9301 return Query(db, db._adapter.ST_CONTAINS, self, value)
9302
9303 - def st_equals(self, value):
9304 db = self.db 9305 return Query(db, db._adapter.ST_EQUALS, self, value)
9306
9307 - def st_intersects(self, value):
9308 db = self.db 9309 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9310
9311 - def st_overlaps(self, value):
9312 db = self.db 9313 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9314
9315 - def st_touches(self, value):
9316 db = self.db 9317 return Query(db, db._adapter.ST_TOUCHES, self, value)
9318
9319 - def st_within(self, value):
9320 db = self.db 9321 return Query(db, db._adapter.ST_WITHIN, self, value)
9322
9323 # for use in both Query and sortby 9324 9325 9326 -class SQLCustomType(object):
9327 """ 9328 allows defining of custom SQL types 9329 9330 Example:: 9331 9332 decimal = SQLCustomType( 9333 type ='double', 9334 native ='integer', 9335 encoder =(lambda x: int(float(x) * 100)), 9336 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9337 ) 9338 9339 db.define_table( 9340 'example', 9341 Field('value', type=decimal) 9342 ) 9343 9344 :param type: the web2py type (default = 'string') 9345 :param native: the backend type 9346 :param encoder: how to encode the value to store it in the backend 9347 :param decoder: how to decode the value retrieved from the backend 9348 :param validator: what validators to use ( default = None, will use the 9349 default validator for type) 9350 """ 9351
9352 - def __init__( 9353 self, 9354 type='string', 9355 native=None, 9356 encoder=None, 9357 decoder=None, 9358 validator=None, 9359 _class=None, 9360 ):
9361 9362 self.type = type 9363 self.native = native 9364 self.encoder = encoder or (lambda x: x) 9365 self.decoder = decoder or (lambda x: x) 9366 self.validator = validator 9367 self._class = _class or type
9368
9369 - def startswith(self, text=None):
9370 try: 9371 return self.type.startswith(self, text) 9372 except TypeError: 9373 return False
9374
9375 - def __getslice__(self, a=0, b=100):
9376 return None
9377
9378 - def __getitem__(self, i):
9379 return None
9380
9381 - def __str__(self):
9382 return self._class
9383
9384 -class FieldVirtual(object):
9385 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9386 # for backward compatibility 9387 (self.name, self.f) = (name, f) if f else ('unknown', name) 9388 self.type = ftype 9389 self.label = label or self.name.capitalize().replace('_',' ') 9390 self.represent = lambda v,r:v 9391 self.formatter = IDENTITY 9392 self.comment = None 9393 self.readable = True 9394 self.writable = False 9395 self.requires = None 9396 self.widget = None 9397 self.tablename = table_name 9398 self.filter_out = None
9399 - def __str__(self):
9400 return '%s.%s' % (self.tablename, self.name)
9401
9402 -class FieldMethod(object):
9403 - def __init__(self, name, f=None, handler=None):
9404 # for backward compatibility 9405 (self.name, self.f) = (name, f) if f else ('unknown', name) 9406 self.handler = handler
9407
9408 -def list_represent(x,r=None):
9409 return ', '.join(str(y) for y in x or [])
9410
9411 -class Field(Expression):
9412 9413 Virtual = FieldVirtual 9414 Method = FieldMethod 9415 Lazy = FieldMethod # for backward compatibility 9416 9417 """ 9418 an instance of this class represents a database field 9419 9420 example:: 9421 9422 a = Field(name, 'string', length=32, default=None, required=False, 9423 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9424 notnull=False, unique=False, 9425 uploadfield=True, widget=None, label=None, comment=None, 9426 uploadfield=True, # True means store on disk, 9427 # 'a_field_name' means store in this field in db 9428 # False means file content will be discarded. 9429 writable=True, readable=True, update=None, authorize=None, 9430 autodelete=False, represent=None, uploadfolder=None, 9431 uploadseparate=False # upload to separate directories by uuid_keys 9432 # first 2 character and tablename.fieldname 9433 # False - old behavior 9434 # True - put uploaded file in 9435 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9436 # directory) 9437 uploadfs=None # a pyfilesystem where to store upload 9438 9439 to be used as argument of DAL.define_table 9440 9441 allowed field types: 9442 string, boolean, integer, double, text, blob, 9443 date, time, datetime, upload, password 9444 9445 """ 9446
9447 - def __init__( 9448 self, 9449 fieldname, 9450 type='string', 9451 length=None, 9452 default=DEFAULT, 9453 required=False, 9454 requires=DEFAULT, 9455 ondelete='CASCADE', 9456 notnull=False, 9457 unique=False, 9458 uploadfield=True, 9459 widget=None, 9460 label=None, 9461 comment=None, 9462 writable=True, 9463 readable=True, 9464 update=None, 9465 authorize=None, 9466 autodelete=False, 9467 represent=None, 9468 uploadfolder=None, 9469 uploadseparate=False, 9470 uploadfs=None, 9471 compute=None, 9472 custom_store=None, 9473 custom_retrieve=None, 9474 custom_retrieve_file_properties=None, 9475 custom_delete=None, 9476 filter_in = None, 9477 filter_out = None, 9478 custom_qualifier = None, 9479 map_none = None, 9480 ):
9481 self._db = self.db = None # both for backward compatibility 9482 self.op = None 9483 self.first = None 9484 self.second = None 9485 if isinstance(fieldname, unicode): 9486 try: 9487 fieldname = str(fieldname) 9488 except UnicodeEncodeError: 9489 raise SyntaxError('Field: invalid unicode field name') 9490 self.name = fieldname = cleanup(fieldname) 9491 if not isinstance(fieldname, str) or hasattr(Table, fieldname) or \ 9492 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9493 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9494 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9495 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9496 self.default = default if default!=DEFAULT else (update or None) 9497 self.required = required # is this field required 9498 self.ondelete = ondelete.upper() # this is for reference fields only 9499 self.notnull = notnull 9500 self.unique = unique 9501 self.uploadfield = uploadfield 9502 self.uploadfolder = uploadfolder 9503 self.uploadseparate = uploadseparate 9504 self.uploadfs = uploadfs 9505 self.widget = widget 9506 self.comment = comment 9507 self.writable = writable 9508 self.readable = readable 9509 self.update = update 9510 self.authorize = authorize 9511 self.autodelete = autodelete 9512 self.represent = list_represent if \ 9513 represent==None and type in ('list:integer','list:string') else represent 9514 self.compute = compute 9515 self.isattachment = True 9516 self.custom_store = custom_store 9517 self.custom_retrieve = custom_retrieve 9518 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9519 self.custom_delete = custom_delete 9520 self.filter_in = filter_in 9521 self.filter_out = filter_out 9522 self.custom_qualifier = custom_qualifier 9523 self.label = label if label!=None else fieldname.replace('_',' ').title() 9524 self.requires = requires if requires!=None else [] 9525 self.map_none = map_none
9526
9527 - def set_attributes(self,*args,**attributes):
9528 self.__dict__.update(*args,**attributes)
9529
9530 - def clone(self,point_self_references_to=False,**args):
9531 field = copy.copy(self) 9532 if point_self_references_to and \ 9533 field.type == 'reference %s'+field._tablename: 9534 field.type = 'reference %s' % point_self_references_to 9535 field.__dict__.update(args) 9536 return field
9537
9538 - def store(self, file, filename=None, path=None):
9539 if self.custom_store: 9540 return self.custom_store(file,filename,path) 9541 if isinstance(file, cgi.FieldStorage): 9542 filename = filename or file.filename 9543 file = file.file 9544 elif not filename: 9545 filename = file.name 9546 filename = os.path.basename(filename.replace('/', os.sep)\ 9547 .replace('\\', os.sep)) 9548 m = REGEX_STORE_PATTERN.search(filename) 9549 extension = m and m.group('e') or 'txt' 9550 uuid_key = web2py_uuid().replace('-', '')[-16:] 9551 encoded_filename = base64.b16encode(filename).lower() 9552 newfilename = '%s.%s.%s.%s' % \ 9553 (self._tablename, self.name, uuid_key, encoded_filename) 9554 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9555 self_uploadfield = self.uploadfield 9556 if isinstance(self_uploadfield,Field): 9557 blob_uploadfield_name = self_uploadfield.uploadfield 9558 keys={self_uploadfield.name: newfilename, 9559 blob_uploadfield_name: file.read()} 9560 self_uploadfield.table.insert(**keys) 9561 elif self_uploadfield == True: 9562 if path: 9563 pass 9564 elif self.uploadfolder: 9565 path = self.uploadfolder 9566 elif self.db._adapter.folder: 9567 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9568 else: 9569 raise RuntimeError( 9570 "you must specify a Field(...,uploadfolder=...)") 9571 if self.uploadseparate: 9572 if self.uploadfs: 9573 raise RuntimeError("not supported") 9574 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9575 uuid_key[:2]) 9576 if not exists(path): 9577 os.makedirs(path) 9578 pathfilename = pjoin(path, newfilename) 9579 if self.uploadfs: 9580 dest_file = self.uploadfs.open(newfilename, 'wb') 9581 else: 9582 dest_file = open(pathfilename, 'wb') 9583 try: 9584 shutil.copyfileobj(file, dest_file) 9585 except IOError: 9586 raise IOError( 9587 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9588 dest_file.close() 9589 return newfilename
9590
9591 - def retrieve(self, name, path=None, nameonly=False):
9592 """ 9593 if nameonly==True return (filename, fullfilename) instead of 9594 (filename, stream) 9595 """ 9596 self_uploadfield = self.uploadfield 9597 if self.custom_retrieve: 9598 return self.custom_retrieve(name, path) 9599 import http 9600 if self.authorize or isinstance(self_uploadfield, str): 9601 row = self.db(self == name).select().first() 9602 if not row: 9603 raise http.HTTP(404) 9604 if self.authorize and not self.authorize(row): 9605 raise http.HTTP(403) 9606 file_properties = self.retrieve_file_properties(name,path) 9607 filename = file_properties['filename'] 9608 if isinstance(self_uploadfield, str): # ## if file is in DB 9609 stream = StringIO.StringIO(row[self_uploadfield] or '') 9610 elif isinstance(self_uploadfield,Field): 9611 blob_uploadfield_name = self_uploadfield.uploadfield 9612 query = self_uploadfield == name 9613 data = self_uploadfield.table(query)[blob_uploadfield_name] 9614 stream = StringIO.StringIO(data) 9615 elif self.uploadfs: 9616 # ## if file is on pyfilesystem 9617 stream = self.uploadfs.open(name, 'rb') 9618 else: 9619 # ## if file is on regular filesystem 9620 # this is intentially a sting with filename and not a stream 9621 # this propagates and allows stream_file_or_304_or_206 to be called 9622 fullname = pjoin(file_properties['path'],name) 9623 if nameonly: 9624 return (filename, fullname) 9625 stream = open(fullname,'rb') 9626 return (filename, stream)
9627
9628 - def retrieve_file_properties(self, name, path=None):
9629 m = REGEX_UPLOAD_PATTERN.match(name) 9630 if not m or not self.isattachment: 9631 raise TypeError('Can\'t retrieve %s file properties' % name) 9632 self_uploadfield = self.uploadfield 9633 if self.custom_retrieve_file_properties: 9634 return self.custom_retrieve_file_properties(name, path) 9635 if m.group('name'): 9636 try: 9637 filename = base64.b16decode(m.group('name'), True) 9638 filename = REGEX_CLEANUP_FN.sub('_', filename) 9639 except (TypeError, AttributeError): 9640 filename = name 9641 else: 9642 filename = name 9643 # ## if file is in DB 9644 if isinstance(self_uploadfield, (str, Field)): 9645 return dict(path=None,filename=filename) 9646 # ## if file is on filesystem 9647 if not path: 9648 if self.uploadfolder: 9649 path = self.uploadfolder 9650 else: 9651 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9652 if self.uploadseparate: 9653 t = m.group('table') 9654 f = m.group('field') 9655 u = m.group('uuidkey') 9656 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9657 return dict(path=path,filename=filename)
9658 9659
9660 - def formatter(self, value):
9661 requires = self.requires 9662 if value is None or not requires: 9663 return value or self.map_none 9664 if not isinstance(requires, (list, tuple)): 9665 requires = [requires] 9666 elif isinstance(requires, tuple): 9667 requires = list(requires) 9668 else: 9669 requires = copy.copy(requires) 9670 requires.reverse() 9671 for item in requires: 9672 if hasattr(item, 'formatter'): 9673 value = item.formatter(value) 9674 return value
9675
9676 - def validate(self, value):
9677 if not self.requires or self.requires == DEFAULT: 9678 return ((value if value!=self.map_none else None), None) 9679 requires = self.requires 9680 if not isinstance(requires, (list, tuple)): 9681 requires = [requires] 9682 for validator in requires: 9683 (value, error) = validator(value) 9684 if error: 9685 return (value, error) 9686 return ((value if value!=self.map_none else None), None)
9687
9688 - def count(self, distinct=None):
9689 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9690
9691 - def as_dict(self, flat=False, sanitize=True):
9692 attrs = ("name", 'authorize', 'represent', 'ondelete', 9693 'custom_store', 'autodelete', 'custom_retrieve', 9694 'filter_out', 'uploadseparate', 'widget', 'uploadfs', 9695 'update', 'custom_delete', 'uploadfield', 'uploadfolder', 9696 'custom_qualifier', 'unique', 'writable', 'compute', 9697 'map_none', 'default', 'type', 'required', 'readable', 9698 'requires', 'comment', 'label', 'length', 'notnull', 9699 'custom_retrieve_file_properties', 'filter_in') 9700 serializable = (int, long, basestring, float, tuple, 9701 bool, type(None)) 9702 9703 def flatten(obj): 9704 if isinstance(obj, dict): 9705 return dict((flatten(k), flatten(v)) for k, v in 9706 obj.items()) 9707 elif isinstance(obj, (tuple, list, set)): 9708 return [flatten(v) for v in obj] 9709 elif isinstance(obj, serializable): 9710 return obj 9711 elif isinstance(obj, (datetime.datetime, 9712 datetime.date, datetime.time)): 9713 return str(obj) 9714 else: 9715 return None
9716 9717 d = dict() 9718 if not (sanitize and not (self.readable or self.writable)): 9719 for attr in attrs: 9720 if flat: 9721 d.update({attr: flatten(getattr(self, attr))}) 9722 else: 9723 d.update({attr: getattr(self, attr)}) 9724 d["fieldname"] = d.pop("name") 9725 return d
9726
9727 - def as_xml(self, sanitize=True):
9728 if have_serializers: 9729 xml = serializers.xml 9730 else: 9731 raise ImportError("No xml serializers available") 9732 d = self.as_dict(flat=True, sanitize=sanitize) 9733 return xml(d)
9734
9735 - def as_json(self, sanitize=True):
9736 if have_serializers: 9737 json = serializers.json 9738 else: 9739 raise ImportError("No json serializers available") 9740 d = self.as_dict(flat=True, sanitize=sanitize) 9741 return json(d)
9742
9743 - def as_yaml(self, sanitize=True):
9744 if have_serializers: 9745 d = self.as_dict(flat=True, sanitize=sanitize) 9746 return serializers.yaml(d) 9747 else: 9748 raise ImportError("No YAML serializers available")
9749
9750 - def __nonzero__(self):
9751 return True
9752
9753 - def __str__(self):
9754 try: 9755 return '%s.%s' % (self.tablename, self.name) 9756 except: 9757 return '<no table>.%s' % self.name
9758
9759 9760 -class Query(object):
9761 9762 """ 9763 a query object necessary to define a set. 9764 it can be stored or can be passed to DAL.__call__() to obtain a Set 9765 9766 Example:: 9767 9768 query = db.users.name=='Max' 9769 set = db(query) 9770 records = set.select() 9771 9772 """ 9773
9774 - def __init__( 9775 self, 9776 db, 9777 op, 9778 first=None, 9779 second=None, 9780 ignore_common_filters = False, 9781 **optional_args 9782 ):
9783 self.db = self._db = db 9784 self.op = op 9785 self.first = first 9786 self.second = second 9787 self.ignore_common_filters = ignore_common_filters 9788 self.optional_args = optional_args
9789
9790 - def __repr__(self):
9791 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9792
9793 - def __str__(self):
9794 return self.db._adapter.expand(self)
9795
9796 - def __and__(self, other):
9797 return Query(self.db,self.db._adapter.AND,self,other)
9798 9799 __rand__ = __and__ 9800
9801 - def __or__(self, other):
9802 return Query(self.db,self.db._adapter.OR,self,other)
9803 9804 __ror__ = __or__ 9805
9806 - def __invert__(self):
9807 if self.op==self.db._adapter.NOT: 9808 return self.first 9809 return Query(self.db,self.db._adapter.NOT,self)
9810
9811 - def __eq__(self, other):
9812 return repr(self) == repr(other)
9813
9814 - def __ne__(self, other):
9815 return not (self == other)
9816
9817 - def case(self,t=1,f=0):
9818 return self.db._adapter.CASE(self,t,f)
9819
9820 - def as_dict(self, flat=False, sanitize=True):
9821 """Experimental stuff 9822 9823 This allows to return a plain dictionary with the basic 9824 query representation. Can be used with json/xml services 9825 for client-side db I/O 9826 9827 Example: 9828 >>> q = db.auth_user.id != 0 9829 >>> q.as_dict(flat=True) 9830 {"op": "NE", "first":{"tablename": "auth_user", 9831 "fieldname": "id"}, 9832 "second":0} 9833 """ 9834 9835 SERIALIZABLE_TYPES = (tuple, dict, set, list, int, long, float, 9836 basestring, type(None), bool) 9837 def loop(d): 9838 newd = dict() 9839 for k, v in d.items(): 9840 if k in ("first", "second"): 9841 if isinstance(v, self.__class__): 9842 newd[k] = loop(v.__dict__) 9843 elif isinstance(v, Field): 9844 newd[k] = {"tablename": v._tablename, 9845 "fieldname": v.name} 9846 elif isinstance(v, Expression): 9847 newd[k] = loop(v.__dict__) 9848 elif isinstance(v, SERIALIZABLE_TYPES): 9849 newd[k] = v 9850 elif isinstance(v, (datetime.date, 9851 datetime.time, 9852 datetime.datetime)): 9853 newd[k] = unicode(v) 9854 elif k == "op": 9855 if callable(v): 9856 newd[k] = v.__name__ 9857 elif isinstance(v, basestring): 9858 newd[k] = v 9859 else: pass # not callable or string 9860 elif isinstance(v, SERIALIZABLE_TYPES): 9861 if isinstance(v, dict): 9862 newd[k] = loop(v) 9863 else: newd[k] = v 9864 return newd
9865 9866 if flat: 9867 return loop(self.__dict__) 9868 else: return self.__dict__
9869 9870
9871 - def as_xml(self, sanitize=True):
9872 if have_serializers: 9873 xml = serializers.xml 9874 else: 9875 raise ImportError("No xml serializers available") 9876 d = self.as_dict(flat=True, sanitize=sanitize) 9877 return xml(d)
9878
9879 - def as_json(self, sanitize=True):
9880 if have_serializers: 9881 json = serializers.json 9882 else: 9883 raise ImportError("No json serializers available") 9884 d = self.as_dict(flat=True, sanitize=sanitize) 9885 return json(d)
9886
9887 -def xorify(orderby):
9888 if not orderby: 9889 return None 9890 orderby2 = orderby[0] 9891 for item in orderby[1:]: 9892 orderby2 = orderby2 | item 9893 return orderby2
9894
9895 -def use_common_filters(query):
9896 return (query and hasattr(query,'ignore_common_filters') and \ 9897 not query.ignore_common_filters)
9898
9899 -class Set(object):
9900 9901 """ 9902 a Set represents a set of records in the database, 9903 the records are identified by the query=Query(...) object. 9904 normally the Set is generated by DAL.__call__(Query(...)) 9905 9906 given a set, for example 9907 set = db(db.users.name=='Max') 9908 you can: 9909 set.update(db.users.name='Massimo') 9910 set.delete() # all elements in the set 9911 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9912 and take subsets: 9913 subset = set(db.users.id<5) 9914 """ 9915
9916 - def __init__(self, db, query, ignore_common_filters = None):
9917 self.db = db 9918 self._db = db # for backward compatibility 9919 self.dquery = None 9920 9921 # if query is a dict, parse it 9922 if isinstance(query, dict): 9923 query = self.parse(query) 9924 9925 if not ignore_common_filters is None and \ 9926 use_common_filters(query) == ignore_common_filters: 9927 query = copy.copy(query) 9928 query.ignore_common_filters = ignore_common_filters 9929 self.query = query
9930
9931 - def __repr__(self):
9932 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9933
9934 - def __call__(self, query, ignore_common_filters=False):
9935 if query is None: 9936 return self 9937 elif isinstance(query,Table): 9938 query = self.db._adapter.id_query(query) 9939 elif isinstance(query,str): 9940 query = Expression(self.db,query) 9941 elif isinstance(query,Field): 9942 query = query!=None 9943 if self.query: 9944 return Set(self.db, self.query & query, 9945 ignore_common_filters=ignore_common_filters) 9946 else: 9947 return Set(self.db, query, 9948 ignore_common_filters=ignore_common_filters)
9949
9950 - def _count(self,distinct=None):
9951 return self.db._adapter._count(self.query,distinct)
9952
9953 - def _select(self, *fields, **attributes):
9954 adapter = self.db._adapter 9955 tablenames = adapter.tables(self.query, 9956 attributes.get('join',None), 9957 attributes.get('left',None), 9958 attributes.get('orderby',None), 9959 attributes.get('groupby',None)) 9960 fields = adapter.expand_all(fields, tablenames) 9961 return adapter._select(self.query,fields,attributes)
9962
9963 - def _delete(self):
9964 db = self.db 9965 tablename = db._adapter.get_table(self.query) 9966 return db._adapter._delete(tablename,self.query)
9967
9968 - def _update(self, **update_fields):
9969 db = self.db 9970 tablename = db._adapter.get_table(self.query) 9971 fields = db[tablename]._listify(update_fields,update=True) 9972 return db._adapter._update(tablename,self.query,fields)
9973
9974 - def as_dict(self, flat=False, sanitize=True):
9975 if flat: 9976 uid = dbname = uri = None 9977 codec = self.db._db_codec 9978 if not sanitize: 9979 uri, dbname, uid = (self.db._dbname, str(self.db), 9980 self.db._db_uid) 9981 d = {"query": self.query.as_dict(flat=flat)} 9982 d["db"] = {"uid": uid, "codec": codec, 9983 "name": dbname, "uri": uri} 9984 return d 9985 else: return self.__dict__
9986
9987 - def as_xml(self, sanitize=True):
9988 if have_serializers: 9989 xml = serializers.xml 9990 else: 9991 raise ImportError("No xml serializers available") 9992 d = self.as_dict(flat=True, sanitize=sanitize) 9993 return xml(d)
9994
9995 - def as_json(self, sanitize=True):
9996 if have_serializers: 9997 json = serializers.json 9998 else: 9999 raise ImportError("No json serializers available") 10000 d = self.as_dict(flat=True, sanitize=sanitize) 10001 return json(d)
10002
10003 - def parse(self, dquery):
10004 "Experimental: Turn a dictionary into a Query object" 10005 self.dquery = dquery 10006 return self.build(self.dquery)
10007
10008 - def build(self, d):
10009 "Experimental: see .parse()" 10010 op, first, second = (d["op"], d["first"], 10011 d.get("second", None)) 10012 left = right = built = None 10013 10014 if op in ("AND", "OR"): 10015 if not (type(first), type(second)) == (dict, dict): 10016 raise SyntaxError("Invalid AND/OR query") 10017 if op == "AND": 10018 built = self.build(first) & self.build(second) 10019 else: built = self.build(first) | self.build(second) 10020 10021 elif op == "NOT": 10022 if first is None: 10023 raise SyntaxError("Invalid NOT query") 10024 built = ~self.build(first) 10025 else: 10026 # normal operation (GT, EQ, LT, ...) 10027 for k, v in {"left": first, "right": second}.items(): 10028 if isinstance(v, dict) and v.get("op"): 10029 v = self.build(v) 10030 if isinstance(v, dict) and ("tablename" in v): 10031 v = self.db[v["tablename"]][v["fieldname"]] 10032 if k == "left": left = v 10033 else: right = v 10034 10035 if hasattr(self.db._adapter, op): 10036 opm = getattr(self.db._adapter, op) 10037 10038 if op == "EQ": built = left == right 10039 elif op == "NE": built = left != right 10040 elif op == "GT": built = left > right 10041 elif op == "GE": built = left >= right 10042 elif op == "LT": built = left < right 10043 elif op == "LE": built = left <= right 10044 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 10045 built = Expression(self.db, opm) 10046 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 10047 "COALESCE_ZERO", "RAW", "INVERT"): 10048 built = Expression(self.db, opm, left) 10049 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 10050 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 10051 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 10052 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 10053 "COALESCE", "CONTAINS", "BELONGS"): 10054 built = Expression(self.db, opm, left, right) 10055 # expression as string 10056 elif not (left or right): built = Expression(self.db, op) 10057 else: 10058 raise SyntaxError("Operator not supported: %s" % op) 10059 10060 return built
10061
10062 - def isempty(self):
10063 return not self.select(limitby=(0,1), orderby_on_limitby=False)
10064
10065 - def count(self,distinct=None, cache=None):
10066 db = self.db 10067 if cache: 10068 cache_model, time_expire = cache 10069 sql = self._count(distinct=distinct) 10070 key = db._uri + '/' + sql 10071 if len(key)>200: key = hashlib_md5(key).hexdigest() 10072 return cache_model( 10073 key, 10074 (lambda self=self,distinct=distinct: \ 10075 db._adapter.count(self.query,distinct)), 10076 time_expire) 10077 return db._adapter.count(self.query,distinct)
10078
10079 - def select(self, *fields, **attributes):
10080 adapter = self.db._adapter 10081 tablenames = adapter.tables(self.query, 10082 attributes.get('join',None), 10083 attributes.get('left',None), 10084 attributes.get('orderby',None), 10085 attributes.get('groupby',None)) 10086 fields = adapter.expand_all(fields, tablenames) 10087 return adapter.select(self.query,fields,attributes)
10088
10089 - def nested_select(self,*fields,**attributes):
10090 return Expression(self.db,self._select(*fields,**attributes))
10091
10092 - def delete(self):
10093 db = self.db 10094 tablename = db._adapter.get_table(self.query) 10095 table = db[tablename] 10096 if any(f(self) for f in table._before_delete): return 0 10097 ret = db._adapter.delete(tablename,self.query) 10098 ret and [f(self) for f in table._after_delete] 10099 return ret
10100
10101 - def update(self, **update_fields):
10102 db = self.db 10103 tablename = db._adapter.get_table(self.query) 10104 table = db[tablename] 10105 table._attempt_upload(update_fields) 10106 if any(f(self,update_fields) for f in table._before_update): 10107 return 0 10108 fields = table._listify(update_fields,update=True) 10109 if not fields: 10110 raise SyntaxError("No fields to update") 10111 ret = db._adapter.update("%s" % table,self.query,fields) 10112 ret and [f(self,update_fields) for f in table._after_update] 10113 return ret
10114
10115 - def update_naive(self, **update_fields):
10116 """ 10117 same as update but does not call table._before_update and _after_update 10118 """ 10119 tablename = self.db._adapter.get_table(self.query) 10120 table = self.db[tablename] 10121 fields = table._listify(update_fields,update=True) 10122 if not fields: raise SyntaxError("No fields to update") 10123 10124 ret = self.db._adapter.update("%s" % table,self.query,fields) 10125 return ret
10126
10127 - def validate_and_update(self, **update_fields):
10128 tablename = self.db._adapter.get_table(self.query) 10129 response = Row() 10130 response.errors = Row() 10131 new_fields = copy.copy(update_fields) 10132 for key,value in update_fields.iteritems(): 10133 value,error = self.db[tablename][key].validate(value) 10134 if error: 10135 response.errors[key] = error 10136 else: 10137 new_fields[key] = value 10138 table = self.db[tablename] 10139 if response.errors: 10140 response.updated = None 10141 else: 10142 if not any(f(self,new_fields) for f in table._before_update): 10143 fields = table._listify(new_fields,update=True) 10144 if not fields: raise SyntaxError("No fields to update") 10145 ret = self.db._adapter.update(tablename,self.query,fields) 10146 ret and [f(self,new_fields) for f in table._after_update] 10147 else: 10148 ret = 0 10149 response.updated = ret 10150 return response
10151
10152 - def delete_uploaded_files(self, upload_fields=None):
10153 table = self.db[self.db._adapter.tables(self.query)[0]] 10154 # ## mind uploadfield==True means file is not in DB 10155 if upload_fields: 10156 fields = upload_fields.keys() 10157 else: 10158 fields = table.fields 10159 fields = [f for f in fields if table[f].type == 'upload' 10160 and table[f].uploadfield == True 10161 and table[f].autodelete] 10162 if not fields: 10163 return False 10164 for record in self.select(*[table[f] for f in fields]): 10165 for fieldname in fields: 10166 field = table[fieldname] 10167 oldname = record.get(fieldname, None) 10168 if not oldname: 10169 continue 10170 if upload_fields and oldname == upload_fields[fieldname]: 10171 continue 10172 if field.custom_delete: 10173 field.custom_delete(oldname) 10174 else: 10175 uploadfolder = field.uploadfolder 10176 if not uploadfolder: 10177 uploadfolder = pjoin( 10178 self.db._adapter.folder, '..', 'uploads') 10179 if field.uploadseparate: 10180 items = oldname.split('.') 10181 uploadfolder = pjoin( 10182 uploadfolder, 10183 "%s.%s" % (items[0], items[1]), 10184 items[2][:2]) 10185 oldpath = pjoin(uploadfolder, oldname) 10186 if exists(oldpath): 10187 os.unlink(oldpath) 10188 return False
10189
10190 -class RecordUpdater(object):
10191 - def __init__(self, colset, table, id):
10192 self.colset, self.db, self.tablename, self.id = \ 10193 colset, table._db, table._tablename, id
10194
10195 - def __call__(self, **fields):
10196 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 10197 table = db[tablename] 10198 newfields = fields or dict(colset) 10199 for fieldname in newfields.keys(): 10200 if not fieldname in table.fields or table[fieldname].type=='id': 10201 del newfields[fieldname] 10202 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 10203 colset.update(newfields) 10204 return colset
10205
10206 -class RecordDeleter(object):
10207 - def __init__(self, table, id):
10208 self.db, self.tablename, self.id = table._db, table._tablename, id
10209 - def __call__(self):
10210 return self.db(self.db[self.tablename]._id==self.id).delete()
10211
10212 -class LazyReferenceGetter(object):
10213 - def __init__(self, table, id):
10214 self.db, self.tablename, self.id = table._db, table._tablename, id
10215 - def __call__(self, other_tablename):
10216 if self.db._lazy_tables is False: 10217 raise AttributeError() 10218 table = self.db[self.tablename] 10219 other_table = self.db[other_tablename] 10220 for rfield in table._referenced_by: 10221 if rfield.table == other_table: 10222 return LazySet(rfield, self.id) 10223 10224 raise AttributeError()
10225
10226 -class LazySet(object):
10227 - def __init__(self, field, id):
10228 self.db, self.tablename, self.fieldname, self.id = \ 10229 field.db, field._tablename, field.name, id
10230 - def _getset(self):
10231 query = self.db[self.tablename][self.fieldname]==self.id 10232 return Set(self.db,query)
10233 - def __repr__(self):
10234 return repr(self._getset())
10235 - def __call__(self, query, ignore_common_filters=False):
10236 return self._getset()(query, ignore_common_filters)
10237 - def _count(self,distinct=None):
10238 return self._getset()._count(distinct)
10239 - def _select(self, *fields, **attributes):
10240 return self._getset()._select(*fields,**attributes)
10241 - def _delete(self):
10242 return self._getset()._delete()
10243 - def _update(self, **update_fields):
10244 return self._getset()._update(**update_fields)
10245 - def isempty(self):
10246 return self._getset().isempty()
10247 - def count(self,distinct=None, cache=None):
10248 return self._getset().count(distinct,cache)
10249 - def select(self, *fields, **attributes):
10250 return self._getset().select(*fields,**attributes)
10251 - def nested_select(self,*fields,**attributes):
10252 return self._getset().nested_select(*fields,**attributes)
10253 - def delete(self):
10254 return self._getset().delete()
10255 - def update(self, **update_fields):
10256 return self._getset().update(**update_fields)
10257 - def update_naive(self, **update_fields):
10258 return self._getset().update_naive(**update_fields)
10259 - def validate_and_update(self, **update_fields):
10260 return self._getset().validate_and_update(**update_fields)
10261 - def delete_uploaded_files(self, upload_fields=None):
10262 return self._getset().delete_uploaded_files(upload_fields)
10263
10264 -class VirtualCommand(object):
10265 - def __init__(self,method,row):
10266 self.method=method 10267 self.row=row
10268 - def __call__(self,*args,**kwargs):
10269 return self.method(self.row,*args,**kwargs)
10270
10271 -def lazy_virtualfield(f):
10272 f.__lazy__ = True 10273 return f
10274
10275 -class Rows(object):
10276 10277 """ 10278 A wrapper for the return value of a select. It basically represents a table. 10279 It has an iterator and each row is represented as a dictionary. 10280 """ 10281 10282 # ## TODO: this class still needs some work to care for ID/OID 10283
10284 - def __init__( 10285 self, 10286 db=None, 10287 records=[], 10288 colnames=[], 10289 compact=True, 10290 rawrows=None 10291 ):
10292 self.db = db 10293 self.records = records 10294 self.colnames = colnames 10295 self.compact = compact 10296 self.response = rawrows
10297
10298 - def __repr__(self):
10299 return '<Rows (%s)>' % len(self.records)
10300
10301 - def setvirtualfields(self,**keyed_virtualfields):
10302 """ 10303 db.define_table('x',Field('number','integer')) 10304 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10305 10306 from gluon.dal import lazy_virtualfield 10307 10308 class MyVirtualFields(object): 10309 # normal virtual field (backward compatible, discouraged) 10310 def normal_shift(self): return self.x.number+1 10311 # lazy virtual field (because of @staticmethod) 10312 @lazy_virtualfield 10313 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10314 db.x.virtualfields.append(MyVirtualFields()) 10315 10316 for row in db(db.x).select(): 10317 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10318 """ 10319 if not keyed_virtualfields: 10320 return self 10321 for row in self.records: 10322 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10323 attributes = dir(virtualfields) 10324 if not tablename in row: 10325 box = row[tablename] = Row() 10326 else: 10327 box = row[tablename] 10328 updated = False 10329 for attribute in attributes: 10330 if attribute[0] != '_': 10331 method = getattr(virtualfields,attribute) 10332 if hasattr(method,'__lazy__'): 10333 box[attribute]=VirtualCommand(method,row) 10334 elif type(method)==types.MethodType: 10335 if not updated: 10336 virtualfields.__dict__.update(row) 10337 updated = True 10338 box[attribute]=method() 10339 return self
10340
10341 - def __and__(self,other):
10342 if self.colnames!=other.colnames: 10343 raise Exception('Cannot & incompatible Rows objects') 10344 records = self.records+other.records 10345 return Rows(self.db,records,self.colnames)
10346
10347 - def __or__(self,other):
10348 if self.colnames!=other.colnames: 10349 raise Exception('Cannot | incompatible Rows objects') 10350 records = self.records 10351 records += [record for record in other.records \ 10352 if not record in records] 10353 return Rows(self.db,records,self.colnames)
10354
10355 - def __nonzero__(self):
10356 if len(self.records): 10357 return 1 10358 return 0
10359
10360 - def __len__(self):
10361 return len(self.records)
10362
10363 - def __getslice__(self, a, b):
10364 return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact)
10365
10366 - def __getitem__(self, i):
10367 row = self.records[i] 10368 keys = row.keys() 10369 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10370 return row[row.keys()[0]] 10371 return row
10372
10373 - def __iter__(self):
10374 """ 10375 iterator over records 10376 """ 10377 10378 for i in xrange(len(self)): 10379 yield self[i]
10380
10381 - def __str__(self):
10382 """ 10383 serializes the table into a csv file 10384 """ 10385 10386 s = StringIO.StringIO() 10387 self.export_to_csv_file(s) 10388 return s.getvalue()
10389
10390 - def first(self):
10391 if not self.records: 10392 return None 10393 return self[0]
10394
10395 - def last(self):
10396 if not self.records: 10397 return None 10398 return self[-1]
10399
10400 - def find(self,f,limitby=None):
10401 """ 10402 returns a new Rows object, a subset of the original object, 10403 filtered by the function f 10404 """ 10405 if not self: 10406 return Rows(self.db, [], self.colnames) 10407 records = [] 10408 if limitby: 10409 a,b = limitby 10410 else: 10411 a,b = 0,len(self) 10412 k = 0 10413 for row in self: 10414 if f(row): 10415 if a<=k: records.append(row) 10416 k += 1 10417 if k==b: break 10418 return Rows(self.db, records, self.colnames)
10419
10420 - def exclude(self, f):
10421 """ 10422 removes elements from the calling Rows object, filtered by the function f, 10423 and returns a new Rows object containing the removed elements 10424 """ 10425 if not self.records: 10426 return Rows(self.db, [], self.colnames) 10427 removed = [] 10428 i=0 10429 while i<len(self): 10430 row = self[i] 10431 if f(row): 10432 removed.append(self.records[i]) 10433 del self.records[i] 10434 else: 10435 i += 1 10436 return Rows(self.db, removed, self.colnames)
10437
10438 - def sort(self, f, reverse=False):
10439 """ 10440 returns a list of sorted elements (not sorted in place) 10441 """ 10442 rows = Rows(self.db,[],self.colnames,compact=False) 10443 rows.records = sorted(self,key=f,reverse=reverse) 10444 return rows
10445
10446 - def group_by_value(self, *fields, **args):
10447 """ 10448 regroups the rows, by one of the fields 10449 """ 10450 one_result = False 10451 if 'one_result' in args: 10452 one_result = args['one_result'] 10453 10454 def build_fields_struct(row, fields, num, groups): 10455 ''' helper function: 10456 ''' 10457 if num > len(fields)-1: 10458 if one_result: 10459 return row 10460 else: 10461 return [row] 10462 10463 key = fields[num] 10464 value = row[key] 10465 10466 if value not in groups: 10467 groups[value] = build_fields_struct(row, fields, num+1, {}) 10468 else: 10469 struct = build_fields_struct(row, fields, num+1, groups[ value ]) 10470 10471 # still have more grouping to do 10472 if type(struct) == type(dict()): 10473 groups[value].update() 10474 # no more grouping, first only is off 10475 elif type(struct) == type(list()): 10476 groups[value] += struct 10477 # no more grouping, first only on 10478 else: 10479 groups[value] = struct 10480 10481 return groups
10482 10483 if len(fields) == 0: 10484 return self 10485 10486 # if select returned no results 10487 if not self.records: 10488 return {} 10489 10490 grouped_row_group = dict() 10491 10492 # build the struct 10493 for row in self: 10494 build_fields_struct(row, fields, 0, grouped_row_group) 10495 10496 return grouped_row_group
10497
10498 - def render(self, i=None, fields=None):
10499 """ 10500 Takes an index and returns a copy of the indexed row with values 10501 transformed via the "represent" attributes of the associated fields. 10502 10503 If no index is specified, a generator is returned for iteration 10504 over all the rows. 10505 10506 fields -- a list of fields to transform (if None, all fields with 10507 "represent" attributes will be transformed). 10508 """ 10509 10510 10511 if i is None: 10512 return (self.render(i, fields=fields) for i in range(len(self))) 10513 import sqlhtml 10514 row = copy.deepcopy(self.records[i]) 10515 keys = row.keys() 10516 tables = [f.tablename for f in fields] if fields \ 10517 else [k for k in keys if k != '_extra'] 10518 for table in tables: 10519 repr_fields = [f.name for f in fields if f.tablename == table] \ 10520 if fields else [k for k in row[table].keys() 10521 if (hasattr(self.db[table], k) and 10522 isinstance(self.db[table][k], Field) 10523 and self.db[table][k].represent)] 10524 for field in repr_fields: 10525 row[table][field] = sqlhtml.represent( 10526 self.db[table][field], row[table][field], row[table]) 10527 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10528 return row[keys[0]] 10529 return row
10530
10531 - def as_list(self, 10532 compact=True, 10533 storage_to_dict=True, 10534 datetime_to_str=False, 10535 custom_types=None):
10536 """ 10537 returns the data as a list or dictionary. 10538 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10539 :param datetime_to_str: convert datetime fields as strings (default False) 10540 """ 10541 (oc, self.compact) = (self.compact, compact) 10542 if storage_to_dict: 10543 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10544 else: 10545 items = [item for item in self] 10546 self.compact = compact 10547 return items
10548 10549
10550 - def as_dict(self, 10551 key='id', 10552 compact=True, 10553 storage_to_dict=True, 10554 datetime_to_str=False, 10555 custom_types=None):
10556 """ 10557 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10558 10559 :param key: the name of the field to be used as dict key, normally the id 10560 :param compact: ? (default True) 10561 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10562 :param datetime_to_str: convert datetime fields as strings (default False) 10563 """ 10564 10565 # test for multiple rows 10566 multi = False 10567 f = self.first() 10568 if f and isinstance(key, basestring): 10569 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10570 if (not "." in key) and multi: 10571 # No key provided, default to int indices 10572 def new_key(): 10573 i = 0 10574 while True: 10575 yield i 10576 i += 1
10577 key_generator = new_key() 10578 key = lambda r: key_generator.next() 10579 10580 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10581 if isinstance(key,str) and key.count('.')==1: 10582 (table, field) = key.split('.') 10583 return dict([(r[table][field],r) for r in rows]) 10584 elif isinstance(key,str): 10585 return dict([(r[key],r) for r in rows]) 10586 else: 10587 return dict([(key(r),r) for r in rows]) 10588
10589 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10590 """ 10591 export data to csv, the first line contains the column names 10592 10593 :param ofile: where the csv must be exported to 10594 :param null: how null values must be represented (default '<NULL>') 10595 :param delimiter: delimiter to separate values (default ',') 10596 :param quotechar: character to use to quote string values (default '"') 10597 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10598 :param represent: use the fields .represent value (default False) 10599 :param colnames: list of column names to use (default self.colnames) 10600 This will only work when exporting rows objects!!!! 10601 DO NOT use this with db.export_to_csv() 10602 """ 10603 delimiter = kwargs.get('delimiter', ',') 10604 quotechar = kwargs.get('quotechar', '"') 10605 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10606 represent = kwargs.get('represent', False) 10607 writer = csv.writer(ofile, delimiter=delimiter, 10608 quotechar=quotechar, quoting=quoting) 10609 colnames = kwargs.get('colnames', self.colnames) 10610 write_colnames = kwargs.get('write_colnames',True) 10611 # a proper csv starting with the column names 10612 if write_colnames: 10613 writer.writerow(colnames) 10614 10615 def none_exception(value): 10616 """ 10617 returns a cleaned up value that can be used for csv export: 10618 - unicode text is encoded as such 10619 - None values are replaced with the given representation (default <NULL>) 10620 """ 10621 if value is None: 10622 return null 10623 elif isinstance(value, unicode): 10624 return value.encode('utf8') 10625 elif isinstance(value,Reference): 10626 return long(value) 10627 elif hasattr(value, 'isoformat'): 10628 return value.isoformat()[:19].replace('T', ' ') 10629 elif isinstance(value, (list,tuple)): # for type='list:..' 10630 return bar_encode(value) 10631 return value
10632 10633 for record in self: 10634 row = [] 10635 for col in colnames: 10636 if not REGEX_TABLE_DOT_FIELD.match(col): 10637 row.append(record._extra[col]) 10638 else: 10639 (t, f) = col.split('.') 10640 field = self.db[t][f] 10641 if isinstance(record.get(t, None), (Row,dict)): 10642 value = record[t][f] 10643 else: 10644 value = record[f] 10645 if field.type=='blob' and not value is None: 10646 value = base64.b64encode(value) 10647 elif represent and field.represent: 10648 value = field.represent(value) 10649 row.append(none_exception(value)) 10650 writer.writerow(row) 10651
10652 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10653 """ 10654 serializes the table using sqlhtml.SQLTABLE (if present) 10655 """ 10656 10657 if strict: 10658 ncols = len(self.colnames) 10659 return '<%s>\n%s\n</%s>' % (rows_name, 10660 '\n'.join(row.as_xml(row_name=row_name, 10661 colnames=self.colnames) for 10662 row in self), rows_name) 10663 10664 import sqlhtml 10665 return sqlhtml.SQLTABLE(self).xml()
10666
10667 - def as_xml(self,row_name='row',rows_name='rows'):
10668 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10669
10670 - def as_json(self, mode='object', default=None):
10671 """ 10672 serializes the rows to a JSON list or object with objects 10673 mode='object' is not implemented (should return a nested 10674 object structure) 10675 """ 10676 10677 items = [record.as_json(mode=mode, default=default, 10678 serialize=False, 10679 colnames=self.colnames) for 10680 record in self] 10681 10682 if have_serializers: 10683 return serializers.json(items, 10684 default=default or 10685 serializers.custom_json) 10686 elif simplejson: 10687 return simplejson.dumps(items) 10688 else: 10689 raise RuntimeError("missing simplejson")
10690 10691 # for consistent naming yet backwards compatible 10692 as_csv = __str__ 10693 json = as_json 10694
10695 10696 ################################################################################ 10697 # dummy function used to define some doctests 10698 ################################################################################ 10699 10700 -def test_all():
10701 """ 10702 10703 >>> if len(sys.argv)<2: db = DAL("sqlite://test.db") 10704 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10705 >>> tmp = db.define_table('users',\ 10706 Field('stringf', 'string', length=32, required=True),\ 10707 Field('booleanf', 'boolean', default=False),\ 10708 Field('passwordf', 'password', notnull=True),\ 10709 Field('uploadf', 'upload'),\ 10710 Field('blobf', 'blob'),\ 10711 Field('integerf', 'integer', unique=True),\ 10712 Field('doublef', 'double', unique=True,notnull=True),\ 10713 Field('jsonf', 'json'),\ 10714 Field('datef', 'date', default=datetime.date.today()),\ 10715 Field('timef', 'time'),\ 10716 Field('datetimef', 'datetime'),\ 10717 migrate='test_user.table') 10718 10719 Insert a field 10720 10721 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10722 uploadf=None, integerf=5, doublef=3.14,\ 10723 jsonf={"j": True},\ 10724 datef=datetime.date(2001, 1, 1),\ 10725 timef=datetime.time(12, 30, 15),\ 10726 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10727 1 10728 10729 Drop the table 10730 10731 >>> db.users.drop() 10732 10733 Examples of insert, select, update, delete 10734 10735 >>> tmp = db.define_table('person',\ 10736 Field('name'),\ 10737 Field('birth','date'),\ 10738 migrate='test_person.table') 10739 >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') 10740 >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') 10741 10742 commented len(db().select(db.person.ALL)) 10743 commented 2 10744 10745 >>> me = db(db.person.id==person_id).select()[0] # test select 10746 >>> me.name 10747 'Massimo' 10748 >>> db.person[2].name 10749 'Massimo' 10750 >>> db.person(2).name 10751 'Massimo' 10752 >>> db.person(name='Massimo').name 10753 'Massimo' 10754 >>> db.person(db.person.name=='Massimo').name 10755 'Massimo' 10756 >>> row = db.person[2] 10757 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10758 True 10759 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10760 1 10761 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10762 1 10763 10764 Update a single record 10765 10766 >>> me.update_record(name="Max") 10767 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10768 >>> me.name 10769 'Max' 10770 10771 Examples of complex search conditions 10772 10773 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10774 1 10775 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10776 1 10777 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10778 1 10779 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10780 >>> me.name 10781 'Max' 10782 10783 Examples of search conditions using extract from date/datetime/time 10784 10785 >>> len(db(db.person.birth.month()==12).select()) 10786 1 10787 >>> len(db(db.person.birth.year()>1900).select()) 10788 1 10789 10790 Example of usage of NULL 10791 10792 >>> len(db(db.person.birth==None).select()) ### test NULL 10793 0 10794 >>> len(db(db.person.birth!=None).select()) ### test NULL 10795 1 10796 10797 Examples of search conditions using lower, upper, and like 10798 10799 >>> len(db(db.person.name.upper()=='MAX').select()) 10800 1 10801 >>> len(db(db.person.name.like('%ax')).select()) 10802 1 10803 >>> len(db(db.person.name.upper().like('%AX')).select()) 10804 1 10805 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10806 0 10807 10808 orderby, groupby and limitby 10809 10810 >>> people = db().select(db.person.name, orderby=db.person.name) 10811 >>> order = db.person.name|~db.person.birth 10812 >>> people = db().select(db.person.name, orderby=order) 10813 10814 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10815 10816 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10817 10818 Example of one 2 many relation 10819 10820 >>> tmp = db.define_table('dog',\ 10821 Field('name'),\ 10822 Field('birth','date'),\ 10823 Field('owner',db.person),\ 10824 migrate='test_dog.table') 10825 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10826 1 10827 10828 A simple JOIN 10829 10830 >>> len(db(db.dog.owner==db.person.id).select()) 10831 1 10832 10833 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10834 1 10835 10836 Drop tables 10837 10838 >>> db.dog.drop() 10839 >>> db.person.drop() 10840 10841 Example of many 2 many relation and Set 10842 10843 >>> tmp = db.define_table('author', Field('name'),\ 10844 migrate='test_author.table') 10845 >>> tmp = db.define_table('paper', Field('title'),\ 10846 migrate='test_paper.table') 10847 >>> tmp = db.define_table('authorship',\ 10848 Field('author_id', db.author),\ 10849 Field('paper_id', db.paper),\ 10850 migrate='test_authorship.table') 10851 >>> aid = db.author.insert(name='Massimo') 10852 >>> pid = db.paper.insert(title='QCD') 10853 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10854 10855 Define a Set 10856 10857 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10858 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10859 >>> for row in rows: print row.author.name, row.paper.title 10860 Massimo QCD 10861 10862 Example of search condition using belongs 10863 10864 >>> set = (1, 2, 3) 10865 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10866 >>> print rows[0].title 10867 QCD 10868 10869 Example of search condition using nested select 10870 10871 >>> nested_select = db()._select(db.authorship.paper_id) 10872 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10873 >>> print rows[0].title 10874 QCD 10875 10876 Example of expressions 10877 10878 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10879 >>> db(mynumber).delete() 10880 0 10881 >>> for i in range(10): tmp = mynumber.insert(x=i) 10882 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10883 45 10884 10885 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10886 5 10887 10888 Output in csv 10889 10890 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10891 author.name,paper.title\r 10892 Massimo,QCD 10893 10894 Delete all leftover tables 10895 10896 >>> DAL.distributed_transaction_commit(db) 10897 10898 >>> db.mynumber.drop() 10899 >>> db.authorship.drop() 10900 >>> db.author.drop() 10901 >>> db.paper.drop() 10902 """
10903 ################################################################################ 10904 # deprecated since the new DAL; here only for backward compatibility 10905 ################################################################################ 10906 10907 SQLField = Field 10908 SQLTable = Table 10909 SQLXorable = Expression 10910 SQLQuery = Query 10911 SQLSet = Set 10912 SQLRows = Rows 10913 SQLStorage = Row 10914 SQLDB = DAL 10915 GQLDB = DAL 10916 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10917 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10918 10919 ################################################################################ 10920 # Geodal utils 10921 ################################################################################ 10922 10923 -def geoPoint(x,y):
10924 return "POINT (%f %f)" % (x,y)
10925
10926 -def geoLine(*line):
10927 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10928
10929 -def geoPolygon(*line):
10930 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10931 10932 ################################################################################ 10933 # run tests 10934 ################################################################################ 10935 10936 if __name__ == '__main__': 10937 import doctest 10938 doctest.testmod() 10939